Reputation: 1469
Hey, I'm doing a project to stabilize video sequence by using optical flow method. I have done well the optical flow so far. But I have 2 branches in front of me to work on it.. 1- after getting the optical flow, I have found the average of the image displacement and then I have subtracted the average from the features of the second frame, my question is what to do next?
2- Or I could use the openCV function in order stabilize image, which I calculated the transformation Matrix and then I used cvPerspectiveTransform then cvWarpPerspective, but I'm getting error which is "bad flag"
you can see the code, what I want is what to do to stabilize the image? I wanna any solution you can provide?
enter code here
#include <stdio.h>
#include <stdlib.h>
//#include "/usr/include/opencv/cv.h"
#include <cv.h>
#include <cvaux.h>
#include <highgui.h>
#include <math.h>
#include <iostream>
#define PI 3.1415926535898
double rads(double degs)
{
return (PI/180 * degs);
}
CvCapture *cap;
IplImage *img;
IplImage *frame;
IplImage *frame1;
IplImage *frame3;
IplImage *frame2;
IplImage *temp_image1;
IplImage *temp_image2;
IplImage *frame1_1C;
IplImage *frame2_1C;
IplImage *eig_image;
IplImage *temp_image;
IplImage *pyramid1 = NULL;
IplImage *pyramid2 = NULL;
char * mapx;
char * mapy;
int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;
int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
{{ 0, 0, 255, 0 }}, // red
{{ 0, 255, 0, 0 }}, // green
{{ 255, 0, 0, 0 }}, // blue
{{ 0, 255, 255, 0 }} // yellow
};
inline static double square(int a)
{
return a * a;
}
char* IntToChar(int num){return NULL;}
/*{
char* retstr = static_cast<char*>(calloc(12, sizeof(char)));
if (sprintf(retstr, "%i", num) > 0)
{
return retstr;
}
else
{
return NULL;
}
}*/
inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
if ( *img != NULL )
return;
*img = cvCreateImage( size, depth, channels );
if ( *img == NULL )
{
fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
exit(-1);
}
}
void clearImage (IplImage *img)
{
for (int i=0; i<img->imageSize; i++)
img->imageData[i] = (char) 0;
}
int main()
{
cap = cvCaptureFromCAM(0);
//cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");
CvSize frame_size;
// Reading the video's frame size
frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
frame_size.width = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );
cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
while(true)
{
frame = cvQueryFrame( cap );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
// Allocating another image if it is not allocated already.
allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame1_1C, 0);
allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame1, 0);
//Get the second frame of video.
frame = cvQueryFrame( cap );
if (frame == NULL)
{
fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
return -1;
}
if(!frame)
{
printf("bad video \n");
exit(0);
}
allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
cvConvertImage(frame, frame2_1C, 0);
allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );
cvConvertImage(frame, frame2, 0);
CvSize optical_flow_window = cvSize(5,5);
eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );
CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
// Feature tracking
CvPoint2D32f frame1_features[4];
CvPoint2D32f frame2_features[4];
//cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );
corner_count = 4;
cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);
cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);
if ( corner_count <= 0 )
printf( "\nNo features detected.\n" );
else
printf( "\nNumber of features found = %d\n", corner_count );
//Locus Kande method.
char optical_flow_found_feature[20];
float optical_flow_feature_error[20];
allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );
cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);
/*
double sumOfDistancesX = 0;
double sumOfDistancesY = 0;
int debug = 0;
CvFont font1, font2;
CvScalar red, green, blue;
IplImage* seg_in = NULL;
IplImage *seg_out = NULL;
allocateOnDemand( &seg_in, frame_size, IPL_DEPTH_8U, 3 );
allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );
clearImage(seg_in);
clearImage(seg_in);
for( int i=0; i <corner_count; i++ )
{
if ( optical_flow_found_feature[i] == 0 )
continue;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x = (int) frame2_features[i].x;
q.y = (int) frame2_features[i].y;
angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
sumOfDistancesX += q.x - p.x;
sumOfDistancesY += q.y - p.y;
//cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
}
*/
/*
int averageDistanceX = sumOfDistancesX / corner_count;
int averageDistanceY = sumOfDistancesY / corner_count;
l.x = averageDistanceX - q.x;
s.y = averageDistanceY - q.y;
*/
#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform
//CvMat* N = cvCreateMat(3,3,CV_32FC1);
cvGetPerspectiveTransform(frame2_features, frame1_features, M);
cvPerspectiveTransform(frame1_features, frame2_features, M);
cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );
cvShowImage("Optical Flow", frame1);
cvWaitKey(50);
}
cvReleaseCapture(&cap);
cvReleaseMat(&M);
return 0;
}
Upvotes: 2
Views: 3187
Reputation: 7148
You don't want to subtract the average displacement from the second image, you want to transform (move) the second image by the average displacement so that it "matches" the first. The "displacement" you use depends on your situation.
EDIT What you basically need to do for option 2 is calculate the average of the average movement between frames over the last few frames. This you could do in any number of ways, but I'd suggest using something like a kalman filter. Then, for a new frame you calculate the movement between that and the (corrected) previous frame. From the movement you get you subtract the average movement up to that point and you move the new frame by that difference.
Upvotes: 3