C.Radford
C.Radford

Reputation: 932

OpenCV C++ create reusable set of keypoints and descriptors for stitching multiple images

I have created a program that can stitch multiple images together and am now looking to improve the efficiency of it. Depending on the size of the stitched image, eventually it is to large and contains too many keypoints that the machine runs out of allocatable memory. To compensate for this my goal is to store all the keypoints and descriptors as they are found so that I don't need to find them again in the master stitched image and only need to find them in the new image being stitched. I had this process working in python but haven't had the same luck in C++. In order to do this I need to perform a perspectiveTransform() on the keypoints and therefor convert them from vector<keypoint> to vector<point2f> and back to vector<keypoint>. I have been able to achieve this and can confirm it works (pick to follow). I am not sure if the same process needs to be done to the descriptors (currently I have don it but either its wrong or not effective).

Issue: When I run this the keypoints and descriptors don't appear to work and I throw an error I created being: "Not enough matches found" even though I know at least the keypoints are making its way to the function.

Here is the code for the keypoint and descriptor transforms. The code first calculates the warpPerspective to be applied to image one as homography will warp the second image only. The rest of the codd deals with keypoints and descriptors.

tuple<Mat, vector<KeyPoint>, Mat>  stitchMatches(Mat image1,Mat image2, Mat homography, vector<KeyPoint> kp1, vector<KeyPoint> kp2 , Mat desc1, Mat desc2){
    Mat result, destination, descriptors_updated;
    vector<Point2f> fourPoint;
    vector<KeyPoint> keypoints_updated;

    //-Get the four corners of the first image (master)
    fourPoint.push_back(Point2f (0,0));
    fourPoint.push_back(Point2f (image1.size().width,0));
    fourPoint.push_back(Point2f (0, image1.size().height));
    fourPoint.push_back(Point2f (image1.size().width, image1.size().height));
    //perspectiveTransform(Mat(fourPoint), destination, homography);


    //- Get points used to determine Htr
    double min_x, min_y, tam_x, tam_y;
    float min_x1, min_x2, min_y1, min_y2, max_x1, max_x2, max_y1, max_y2;
    min_x1 = min(fourPoint.at(0).x, fourPoint.at(1).x);
    min_x2 = min(fourPoint.at(2).x, fourPoint.at(3).x);
    min_y1 = min(fourPoint.at(0).y, fourPoint.at(1).y);
    min_y2 = min(fourPoint.at(2).y, fourPoint.at(3).y);
    max_x1 = max(fourPoint.at(0).x, fourPoint.at(1).x);
    max_x2 = max(fourPoint.at(2).x, fourPoint.at(3).x);
    max_y1 = max(fourPoint.at(0).y, fourPoint.at(1).y);
    max_y2 = max(fourPoint.at(2).y, fourPoint.at(3).y);
    min_x = min(min_x1, min_x2);
    min_y = min(min_y1, min_y2);
    tam_x = max(max_x1, max_x2);
    tam_y = max(max_y1, max_y2);

    //- Htr use to map image one to result in line with the alredy warped image 1
    Mat Htr = Mat::eye(3,3,CV_64F);
    if (min_x < 0){
        tam_x = image2.size().width - min_x;
        Htr.at<double>(0,2)= -min_x;
    }
    if (min_y < 0){
        tam_y = image2.size().height - min_y;
        Htr.at<double>(1,2)= -min_y;
    }

    result = Mat(Size(tam_x*2,tam_y*2), CV_8UC3,cv::Scalar(0,0,0));
    warpPerspective(image2, result, Htr, result.size(), INTER_LINEAR, BORDER_TRANSPARENT,   0);
    warpPerspective(image1, result, (Htr*homography), result.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);



    //-- Variables to hold the keypoints at the respective stages
    vector<Point2f> kp1Local,kp2Local;
    vector<KeyPoint> kp1updated, kp2updated;


    //Localize the keypoints to allow for perspective change
    KeyPoint::convert(kp1, kp1Local);
    KeyPoint::convert(kp2, kp2Local);

    //perform persepctive transform on the keypoints of type vector<point2f>
    perspectiveTransform(kp1Local, kp1Local, (Htr));
    perspectiveTransform(kp2Local, kp2Local, (Htr*homography));


    //convert keypoints back to type vector<keypoint>
    for( size_t i = 0; i < kp1Local.size(); i++ ) {
        kp1updated.push_back(KeyPoint(kp1Local[i], 1.f));
    }
    for( size_t i = 0; i < kp2Local.size(); i++ ) {
        kp2updated.push_back(KeyPoint(kp2Local[i], 1.f));
    }

    //Add to master of list of keypoints to be passed along during next iteration of image
    keypoints_updated.reserve(kp1updated.size() + kp2updated.size());
    keypoints_updated.insert(keypoints_updated.end(),kp1updated.begin(),kp1updated.end());
    keypoints_updated.insert(keypoints_updated.end(),kp2updated.begin(),kp2updated.end());

    //WarpPerspective of decriptors to match that of the images and cooresponding keypoints
    Mat desc1New, desc2New;
    warpPerspective(desc2, desc2New, Htr, result.size(), INTER_LINEAR, BORDER_TRANSPARENT,   0);
    warpPerspective(desc1, desc1New, (Htr*homography), result.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);

    //create a new Mat including the descriports from desc1 and desc2
    descriptors_updated.push_back(desc1New);
    descriptors_updated.push_back(desc2New);


    //------------TEST TO see if keypoints have moved

    Mat img_keypoints;
    drawKeypoints( result, keypoints_updated, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );

    imshow("Keypoints 1", img_keypoints );
    waitKey();
    destroyAllWindows();



    return {result, keypoints_updated, descriptors_updated};
}

The following code is my master stitching program that does the actual stitching.

tuple<Mat,vector<KeyPoint>,Mat> stitch(Mat img1,Mat img2 ,vector<KeyPoint> keypoints, Mat descriptors, String featureDetection,String featureExtractor,String keypointsMatcher,String showMatches){

    Mat desc, desc1, desc2, homography, result, croppedResult,descriptors_updated;
    std::vector<KeyPoint> keypoints_updated, kp1, kp2;
    std::vector<DMatch> matches;
    //-Base Case[2]
    if (keypoints.empty()){

        //-Detect Keypoints and their descriptors
        tie(kp1,desc1) = KeyPointDescriptor(img1, featureDetection,featureExtractor);
        tie(kp2,desc2) = KeyPointDescriptor(img2, featureDetection,featureExtractor);

        //Find matches and calculated homography based on keypoints and descriptors
        std::tie(matches,homography) = matchFeatures(kp1,  desc1,kp2, desc2, keypointsMatcher);
        //draw matches if requested
        if(showMatches == "true"){
            drawMatchedImages( img1, kp1, img2, kp2, matches);
        }
        //stitch the images and update the keypoint and descriptors
        std::tie(result,keypoints_updated,descriptors_updated) = stitchMatches(img1, img2, homography,kp1,kp2,desc1,desc2);
        //crop function using created cropping function
        croppedResult = crop(result);
        return {croppedResult,keypoints_updated,descriptors_updated};

    }

    //base case[3:n]
    else{

        //Get keypoints and descriptors of new image and add to respective lists
        tie(kp2,desc2) = KeyPointDescriptor(img2, featureDetection,featureExtractor);

        //find matches and determine homography
        std::tie(matches,homography) = matchFeatures(keypoints_updated,descriptors_updated,kp2,desc2, keypointsMatcher);
        //draw matches if requested
        if(showMatches == "true")
            drawMatchedImages( img1, keypoints, img2, kp2, matches);

        //stitch the images and update the keypoint and descriptors
        tie(result,keypoints_updated,descriptors_updated) = stitchMatches(img1, img2, homography,keypoints,kp2,descriptors,desc2);
        //crop function using created cropping function
        croppedResult = crop(result);
        return {croppedResult,keypoints_updated,descriptors_updated};
        }
}

Lastly here is the image of the keypoints that are being transformed onto the stitched image. Any help is so greatly appreciated!

enter image description here

Upvotes: 0

Views: 965

Answers (1)

C.Radford
C.Radford

Reputation: 932

After combing through the I just happened to find I was using the wrong variable at one point!:)

Upvotes: 1

Related Questions