Reputation: 327
Below is a Matlab function that implements the triangulation method as described in Multiple View Geometry. It takes the 8 corner points of a cube (OriginalPoints) and projects them on two screens. The screen points are then in CameraPoints and ProjectorPoints. The goal is to reconstruct the original points from these two views using the Direct Linear Transform. Unfortunately the result is gibberish.
There is another topic here on SO, from which the normalization of the rows of A are taken, but that did not improve the result.
Three general questions:
% Demos the triangulation method using the direct linear transform
% algorithm
function testTriangulation()
close all;
camWidth = 800;
camHeight = 600;
projectorWidth = 5080;
projectorHeight = 760;
% camera matrices
CameraMatrix = [
-1.81066 0.00000 0.00000 0.00000;
0.00000 2.41421 0.00000 0.00000;
0.00000 0.00000 1.00000 1.50000
];
ProjectorMatrix = [
-0.36118 0.00000 0.00000 0.00000
0.00000 2.00875 1.33916 0.00000
0.00000 -0.55470 0.83205 1.80278
];
% generating original points and displaying them
OriginalPoints = [
-0.2 -0.2 -0.2 -0.2 0.2 0.2 0.2 0.2;
-0.2 -0.2 0.2 0.2 -0.2 -0.2 0.2 0.2;
-0.2 0.2 -0.2 0.2 -0.2 0.2 -0.2 0.2;
1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
];
scatter3(OriginalPoints(1, :), OriginalPoints(2, :), OriginalPoints(3,:));
title('Original Points');
% projecting and normalizing camera points
CameraPoints = CameraMatrix * OriginalPoints;
for i = 1:3
CameraPoints(i, :) = CameraPoints(i, :) ./ CameraPoints(3,:);
end
% figure();
% scatter(CameraPoints(1,:), CameraPoints(2,:));
% title('Projected Camera Points');
% transforming camera points to screen coordinates
camXOffset = camWidth / 2;
camYOffset = camHeight / 2;
CameraPoints(1,:) = CameraPoints(1,:) * camXOffset + camXOffset;
CameraPoints(2,:) = CameraPoints(2,:) * camYOffset + camYOffset;
figure();
scatter(CameraPoints(1,:), CameraPoints(2,:));
title('Projected Camera Points in Screen Coordinates');
% projecting and normalizing projector points
ProjectorPoints = ProjectorMatrix * OriginalPoints;
for i = 1:3
ProjectorPoints(i, :) = ProjectorPoints(i, :) ./ ProjectorPoints(3,:);
end
% figure();
% scatter(ProjectorPoints(1,:), ProjectorPoints(2,:));
% title('Projected Projector Points');
% transforming projector points to screen coordinates
projectorXOffset = projectorWidth / 2;
projectorYOffset = projectorHeight / 2;
ProjectorPoints(1,:) = ProjectorPoints(1,:) * projectorXOffset + projectorXOffset;
ProjectorPoints(2,:) = ProjectorPoints(2,:) * projectorYOffset + projectorYOffset;
figure();
scatter(ProjectorPoints(1,:), ProjectorPoints(2,:));
title('Projected Projector Points in Screen Coordinates');
% normalizing camera points (i.e. origin is
% the centroid and average distance is sqrt(2)).
camCenter = mean(CameraPoints, 2);
CameraPoints(1,:) = CameraPoints(1,:) - camCenter(1);
CameraPoints(2,:) = CameraPoints(2,:) - camCenter(2);
avgDistance = 0.0;
for i = 1:length(CameraPoints(1,:))
avgDistance = avgDistance + norm(CameraPoints(:, i));
end
avgDistance = avgDistance / length(CameraPoints(1, :));
scaleFactor = sqrt(2) / avgDistance;
CameraPoints(1:2, :) = CameraPoints(1:2, :) * scaleFactor;
% normalizing projector points (i.e. origin is
% the centroid and average distance is sqrt(2)).
projectorCenter = mean(ProjectorPoints, 2);
ProjectorPoints(1,:) = ProjectorPoints(1,:) - projectorCenter(1);
ProjectorPoints(2,:) = ProjectorPoints(2,:) - projectorCenter(2);
avgDistance = 0.0;
for i = 1:length(ProjectorPoints(1,:))
avgDistance = avgDistance + norm(ProjectorPoints(:, i));
end
avgDistance = avgDistance / length(ProjectorPoints(1, :));
scaleFactor = sqrt(2) / avgDistance;
ProjectorPoints(1:2, :) = ProjectorPoints(1:2, :) * scaleFactor;
% triangulating points
TriangulatedPoints = zeros(size(OriginalPoints));
for i = 1:length(OriginalPoints(1, :))
A = [
CameraPoints(1, i) * CameraMatrix(3, :) - CameraMatrix(1, :);
CameraPoints(2, i) * CameraMatrix(3, :) - CameraMatrix(2, :);
ProjectorPoints(1, i) * ProjectorMatrix(3,:) - ProjectorMatrix(1,:);
ProjectorPoints(2, i) * ProjectorMatrix(3,:) - ProjectorMatrix(2,:)
];
% normalizing rows of A, as suggested in a topic on StackOverflow
A(1,:) = A(1,:)/norm(A(1,:));
A(2,:) = A(2,:)/norm(A(2,:));
A(3,:) = A(3,:)/norm(A(3,:));
A(4,:) = A(4,:)/norm(A(4,:));
[U S V] = svd(A);
TriangulatedPoints(:, i) = V(:, end);
end
for i = 1:4
TriangulatedPoints(i, :) = TriangulatedPoints(i, :) ./ TriangulatedPoints(4, :);
end
figure()
scatter3(TriangulatedPoints(1, :), TriangulatedPoints(2, :), TriangulatedPoints(3, :));
title('Triangulated Points');
Upvotes: 2
Views: 6462
Reputation: 327
What was missing from the code above, was that the camera matrices also need to be transformed by the normalizing transformation. Below is an example that works. Notice also the difference in structure of the normalizing transformations. In the question's example the scaling factor is put in the last element. Below it's multiplied with all the elements, such that the last element is 1.
function testTriangulationDavid()
close all
clear all
P = [
-1.81066 0.00000 0.00000 0.00000;
0.00000 2.41421 0.00000 0.00000;
0.00000 0.00000 1.00000 1.50000
];
Q = [
-0.36118 0.00000 0.00000 0.00000
0.00000 2.00875 1.33916 0.00000
0.00000 -0.55470 0.83205 1.80278
];
% homogenous 3D coordinates
Pts = [
-0.2 -0.2 -0.2 -0.2 0.2 0.2 0.2 0.2;
-0.2 -0.2 0.2 0.2 -0.2 -0.2 0.2 0.2;
-0.2 0.2 -0.2 0.2 -0.2 0.2 -0.2 0.2;
1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0
];
numPts = length(Pts(1,:));
% project points
PPtsHom = P*Pts;
QPtsHom = Q*Pts;
% normalize for homogenous scaling
for i = 1:3
PPtsHom(i, :) = PPtsHom(i, :) ./ PPtsHom(3, :);
QPtsHom(i, :) = QPtsHom(i, :) ./ QPtsHom(3, :);
end
% 2D cartesian coordinates
PPtsCart = PPtsHom(1:2,:);
QPtsCart = QPtsHom(1:2,:);
% compute normalizing transform
% calc centroid
centroidPPtsCart = mean(PPtsCart,2);
centroidQPtsCart = mean(QPtsCart,2);
% calc mean distance to centroid
normsPPtsCart = zeros(1, numPts);
normsQPtsCart = zeros(1, numPts);
for i = 1:numPts
normsPPtsCart(1,i) = norm(PPtsCart(:,i) - centroidPPtsCart);
normsQPtsCart(1,i) = norm(QPtsCart(:,i) - centroidQPtsCart);
end
mdcPPtsCart = mean(normsPPtsCart);
mdcQPtsCart = mean(normsQPtsCart);
% setup transformation
scaleP = sqrt(2)/mdcPPtsCart;
scaleQ = sqrt(2)/mdcQPtsCart;
tP = [ scaleP 0 -scaleP*centroidPPtsCart(1);
0 scaleP -scaleP*centroidPPtsCart(2);
0 0 1];
tQ = [ scaleQ 0 -scaleQ*centroidQPtsCart(1);
0 scaleQ -scaleQ*centroidQPtsCart(2);
0 0 1];
% transform points
PPtsHom = tP * PPtsHom;
QPtsHom = tQ * QPtsHom;
% normalize for homogenous scaling
for i = 1:3
PPtsHom(i, :) = PPtsHom(i, :) ./ PPtsHom(3, :);
QPtsHom(i, :) = QPtsHom(i, :) ./ QPtsHom(3, :);
end
% 2D cartesian coordinates
PPtsCart = PPtsHom(1:2,:);
QPtsCart = QPtsHom(1:2,:);
% transform cameras
P = tP * P;
Q = tQ * Q;
% triangulating points
TriangulatedPoints = zeros(4,numPts);
for i = 1:numPts
A = [
PPtsCart(1, i) * P(3, :) - P(1, :);
PPtsCart(2, i) * P(3, :) - P(2, :);
QPtsCart(1, i) * Q(3,:) - Q(1,:);
QPtsCart(2, i) * Q(3,:) - Q(2,:)
]
return;
[U S V] = svd(A);
TriangulatedPoints(:, i) = V(:, end);
end
for i = 1:4
TriangulatedPoints(i, :) = TriangulatedPoints(i, :) ./ TriangulatedPoints(4, :);
end
figure()
scatter3(TriangulatedPoints(1, :), TriangulatedPoints(2, :), TriangulatedPoints(3, :));
title('Triangulated Points');
axis equal;
Upvotes: 1