Reputation: 79
I've made a simulation of fish eye distortion.
I want to develop a reverse program that can convert the distorted image to normal image.
I've tried to use undistortPonts() function but couldn't understand the input(dist-coefficient).
cv.UndistortPoints(distorted, undistorted, intrinsics, dist_coeffs)
My code for fish eye distortion:
#include "stdio.h"
#include <cv.h>
#include <highgui.h>
#include <math.h>
#include <iostream>
void sampleImage(const IplImage* arr, float idx0, float idx1, CvScalar& res)
{
if(idx0<0 || idx1<0 || idx0>(cvGetSize(arr).height-1) || idx1>(cvGetSize(arr).width-1))
{
res.val[0]=0;
res.val[1]=0;
res.val[2]=0;
res.val[3]=0;
return;
}
float idx0_fl=floor(idx0);
float idx0_cl=ceil(idx0);
float idx1_fl=floor(idx1);
float idx1_cl=ceil(idx1);
CvScalar s1=cvGet2D(arr,(int)idx0_fl,(int)idx1_fl);
CvScalar s2=cvGet2D(arr,(int)idx0_fl,(int)idx1_cl);
CvScalar s3=cvGet2D(arr,(int)idx0_cl,(int)idx1_cl);
CvScalar s4=cvGet2D(arr,(int)idx0_cl,(int)idx1_fl);
float x = idx0 - idx0_fl;
float y = idx1 - idx1_fl;
res.val[0]= s1.val[0]*(1-x)*(1-y) + s2.val[0]*(1-x)*y + s3.val[0]*x*y + s4.val[0]*x*(1-y);
res.val[1]= s1.val[1]*(1-x)*(1-y) + s2.val[1]*(1-x)*y + s3.val[1]*x*y + s4.val[1]*x*(1-y);
res.val[2]= s1.val[2]*(1-x)*(1-y) + s2.val[2]*(1-x)*y + s3.val[2]*x*y + s4.val[2]*x*(1-y);
res.val[3]= s1.val[3]*(1-x)*(1-y) + s2.val[3]*(1-x)*y + s3.val[3]*x*y + s4.val[3]*x*(1-y);
}
float xscale;
float yscale;
float xshift;
float yshift;
float getRadialX(float x,float y,float cx,float cy,float k)
{
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = x+((x-cx)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float getRadialY(float x,float y,float cx,float cy,float k)
{
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = y+((y-cy)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float thresh = 1;
float calc_shift(float x1,float x2,float cx,float k)
{
float x3 = x1+(x2-x1)*0.5;
float res1 = x1+((x1-cx)*k*((x1-cx)*(x1-cx)));
float res3 = x3+((x3-cx)*k*((x3-cx)*(x3-cx)));
// std::cerr<<"x1: "<<x1<<" - "<<res1<<" x3: "<<x3<<" - "<<res3<<std::endl;
if(res1>-thresh && res1 < thresh)
return x1;
if(res3<0)
{
return calc_shift(x3,x2,cx,k);
}
else
{
return calc_shift(x1,x3,cx,k);
}
}
int main(int argc, char** argv)
{
IplImage* src = cvLoadImage( "D:\\2012 Projects\\FishEye\\Debug\\images\\grid1.bmp", 1 );
IplImage* dst = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
IplImage* dst2 = cvCreateImage(cvGetSize(src),src->depth,src->nChannels);
float K=0.002;
float centerX=(float)(src->width/2);
float centerY=(float)(src->height/2);
int width = cvGetSize(src).width;
int height = cvGetSize(src).height;
xshift = calc_shift(0,centerX-1,centerX,K);
float newcenterX = width-centerX;
float xshift_2 = calc_shift(0,newcenterX-1,newcenterX,K);
yshift = calc_shift(0,centerY-1,centerY,K);
float newcenterY = height-centerY;
float yshift_2 = calc_shift(0,newcenterY-1,newcenterY,K);
// scale = (centerX-xshift)/centerX;
xscale = (width-xshift-xshift_2)/width;
yscale = (height-yshift-yshift_2)/height;
std::cerr<<xshift<<" "<<yshift<<" "<<xscale<<" "<<yscale<<std::endl;
std::cerr<<cvGetSize(src).height<<std::endl;
std::cerr<<cvGetSize(src).width<<std::endl;
for(int j=0;j<cvGetSize(dst).height;j++)
{
for(int i=0;i<cvGetSize(dst).width;i++)
{
CvScalar s;
float x = getRadialX((float)i,(float)j,centerX,centerY,K);
float y = getRadialY((float)i,(float)j,centerX,centerY,K);
sampleImage(src,y,x,s);
cvSet2D(dst,j,i,s);
}
}
#if 0
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", dst);
cvWaitKey(0);
#endif
cvSaveImage("D:\\2012 Projects\\FishEye\\Debug\\images\\grid3.bmp",dst,0);
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", src);
cvWaitKey(0);
cvNamedWindow( "Distortion", 2 );
cvShowImage( "Distortion", dst);
cvWaitKey(0);
#if 0
for(int j=0;j<cvGetSize(src).height;j++)
{
for(int i=0;i<cvGetSize(src).width;i++)
{
CvScalar s;
sampleImage(src,j+0.25,i+0.25,s);
cvSet2D(dst,j,i,s);
}
}
cvNamedWindow( "Source1", 1 );
cvShowImage( "Source1", src);
cvWaitKey(0);
#endif
}
Upvotes: 0
Views: 2133
Reputation: 8980
Actually, my original anwser was about the undistortion algorithm for individual points. If you want to undistort a complete image, there is a much simpler technique, as explained in this other thread:
Understanding of openCV undistortion
The outline of the algorithm (which is the one used in OpenCV function undistort()
) is as follow. For each pixel of the destination lens-corrected image do:
Convert the pixel coordinates (u_dst, v_dst)
to normalized coordinates (x', y')
using the inverse of the calibration matrix K
,
Apply your lens-distortion model, to obtain the distorted normalized coordinates (x'', y'')
,
Convert (x'', y'')
to distorted pixel coordinates (u_src, v_src)
using the calibration matrix K
,
Use the interpolation method of your choice to find the intensity/depth associated with the pixel coordinates (u_src, v_src)
in the source image, and assign this intensity/depth to the current destination pixel (u_dst, v_dst)
.
Original answer:
Here is the undistortion algorithm extracted from OpenCV function undistortPoints()
:
void dist2norm(const cv::Point2d &pt_dist, cv::Point2d &pt_norm) const {
pt_norm.x = (pt_dist.x-Kcx)/Kfx;
pt_norm.y = (pt_dist.y-Kcy)/Kfy;
int niters=(Dk1!=0.?5:0);
double x0=pt_norm.x, y0=pt_norm.y;
for(int i=0; i<niters; ++i) {
double x2=pt_norm.x*pt_norm.x,
y2=pt_norm.y*pt_norm.y,
xy=pt_norm.x*pt_norm.y,
r2=x2+y2;
double icdist = 1./(1 + ((Dk3*r2 + Dk2)*r2 + Dk1)*r2);
double deltaX = 2*Dp1*xy + Dp2*(r2 + 2*x2);
double deltaY = Dp1*(r2 + 2*y2) + 2*Dp2*xy;
pt_norm.x = (x0-deltaX)*icdist;
pt_norm.y = (y0-deltaY)*icdist;
}
}
If you provide the coordinates of a point in the distorted image in argument pt_dist
, it will calculate the normalized coordinates of the associated point and return them in pt_norm
. Then, you can obtain the coordinates of the associated point in the undistorted image as
pt_undist = K . [pt_norm.x; pt_norm.y; 1]
where K is the camera matrix.
The standard lens distortion model used by OpenCV is explained at the beginning of this page:
where the distortion coefficients are (k1,k2,p1,p2,k3, k4,k5,k6)
(most often we use k4=k5=k6=0
).
I don't know what is your model for FishEye distortion, but you can surely adapt the above algorithm to your case. Otherwise, you may use a non-linear optimization algorithm (e.g. Levenberg-Marquardt or any other), to recover the undistorted coordinates from the distorted one.
Upvotes: 1