4/11/2014

Video Stabilization example source code, (using cvFindHomography, cvWarpPerspective functions in openCV)

Video stabilization example source code.

The principle is like that...



Firstly, to obtain 2 adjacent images
extract good feature to track.
Match each features between 2 images
Get Homography matrix
Warp current image to old image by using H.
However, H is multiplied by cumulatively.

Optical flow source code is based on http://feelmare.blogspot.kr/2012/10/optical-flow-sample-source-code-using.html

This is the result of video stabilizing.
This is example source code. ---
#define MAX_COUNT 250   
#define DELAY_T 3
#define PI 3.1415   


void main()   
{   

 //////////////////////////////////////////////////////////////////////////   
 //image class         
 IplImage* image = 0;   

 //T, T-1 image   
 IplImage* current_Img = 0;   
 IplImage* Old_Img = 0;   

 //Optical Image   
 IplImage * imgA=0;   
 IplImage * imgB=0;   


 //Video Load   
 CvCapture * capture = cvCreateFileCapture("cam1.wmv"); //cvCaptureFromCAM(0); //cvCreateFileCapture("1.avi");   

 //Window   
 cvNamedWindow( "Origin OpticalFlow" , WINDOW_NORMAL);
 //cvNamedWindow( "RealOrigin" , WINDOW_NORMAL);
 //////////////////////////////////////////////////////////////////////////   


 //////////////////////////////////////////////////////////////////////////    
 //Optical Flow Variables    
 IplImage * eig_image=0;
 IplImage * tmp_image=0;
 int corner_count = MAX_COUNT;   
 CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_COUNT ];   
 CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_COUNT ];   

 CvSize img_sz;   
 int win_size=20;   

 IplImage* pyrA=0;   
 IplImage* pyrB=0;   

 char features_found[ MAX_COUNT ];   
 float feature_errors[ MAX_COUNT ];   
 //////////////////////////////////////////////////////////////////////////   


 //////////////////////////////////////////////////////////////////////////   
 //Variables for time different video   
 int one_zero=0;   
 //int t_delay=0;   

 double gH[9]={1,0,0, 0,1,0, 0,0,1};
 CvMat gmxH = cvMat(3, 3, CV_64F, gH);



 //Routine Start   
 while(1) {      


  //capture a frame form cam      
  if( cvGrabFrame( capture ) == 0 )   
   break;   
  //image = cvRetrieveFrame( capture );   
  //cvShowImage("RealOrigin", image );


  //Image Create   
  if(Old_Img == 0)      
  {      
   image = cvRetrieveFrame( capture );   
   current_Img = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   memcpy(current_Img->imageData, image->imageData, sizeof(char)*image->imageSize );
   Old_Img  = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   one_zero=1;
  }   



  if(one_zero == 0 )   
  {   
   if(eig_image == 0)
   {
    eig_image = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
    tmp_image = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   }

   //copy to image class   
   memcpy(Old_Img->imageData, current_Img->imageData, sizeof(char)*image->imageSize );   
   image = cvRetrieveFrame( capture );   
   memcpy(current_Img->imageData, image->imageData, sizeof(char)*image->imageSize );   

   //////////////////////////////////////////////////////////////////////////   
   //Create image for Optical flow   
   if(imgA == 0)   
   {   
    imgA = cvCreateImage( cvSize(image->width, image->height), IPL_DEPTH_8U, 1);   
    imgB = cvCreateImage( cvSize(image->width, image->height), IPL_DEPTH_8U, 1);       
   }      

   //RGB to Gray for Optical Flow   
   cvCvtColor(current_Img, imgA, CV_BGR2GRAY);   
   cvCvtColor(Old_Img, imgB, CV_BGR2GRAY);      

   //extract features
   cvGoodFeaturesToTrack(imgA, eig_image, tmp_image, cornersA, &corner_count, 0.01, 5.0, 0, 3, 0, 0.04);   
   cvFindCornerSubPix(imgA, cornersA, corner_count, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));      


   CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );   
   if( pyrA == 0)   
   {    
    pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1);   
    pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1);   
   }   

   //Optical flow
   cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, cvSize(win_size, win_size), 5, features_found, feature_errors, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3), 0);   

   /////////////////////////////////////////////////////////////////////////      
   int fCount=0;
   for(int i=0; i < corner_count; ++i)
   {

    if( features_found[i] == 0 || feature_errors[i] > MAX_COUNT )
     continue;

    fCount++;
    //////////////////////////////////////////////////////////////////////////       
    //Vector Length   
    //float fVecLength = sqrt((float)((cornersA[i].x-cornersB[i].x)*(cornersA[i].x-cornersB[i].x)+(cornersA[i].y-cornersB[i].y)*(cornersA[i].y-cornersB[i].y)));   
    //Vector Angle   
    //float fVecSetha  = fabs( atan2((float)(cornersB[i].y-cornersA[i].y), (float)(cornersB[i].x-cornersA[i].x)) * 180/PI );   
    //cvLine( image, cvPoint(cornersA[i].x, cornersA[i].y), cvPoint(cornersB[i].x, cornersA[i].y), CV_RGB(0, 255, 0), 2);    
   }

   printf("%d \n", fCount);

   int inI=0;
   CvPoint2D32f* pt1 = new CvPoint2D32f[ fCount ];
   CvPoint2D32f * pt2 = new CvPoint2D32f[ fCount ];
   for(int i=0; i < corner_count; ++i)
   {
    if( features_found[i] == 0 || feature_errors[i] > MAX_COUNT )
     continue;
    pt1[inI] = cornersA[i];
    pt2[inI] = cornersB[i];
    
    cvLine( image, cvPoint(pt1[inI].x, pt1[inI].y), cvPoint(pt2[inI].x, pt2[inI].y), CV_RGB(0, 255, 0), 2);    
    inI++;
   }

   //FindHomography
   CvMat M1, M2;
   double H[9];
   CvMat mxH = cvMat(3, 3, CV_64F, H);
   M1 = cvMat(1, fCount, CV_32FC2, pt1);
   M2 = cvMat(1, fCount, CV_32FC2, pt2);

   //M2 = H*M1 , old = H*current
   if( !cvFindHomography(&M1, &M2, &mxH, CV_RANSAC, 2))  //if( !cvFindHomography(&M1, &M2, &mxH, CV_RANSAC, 2))
   {                 
    printf("Find Homography Fail!\n");
    
   }else{
    //printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n", H[0], H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] );
   }

   delete pt1;
   delete pt2;    

   //warping by H
   //warpAffine(warped_2,warped_3,Transform_avg,Size( reSizeMat.cols, reSizeMat.rows));
   //warpPerspective(cameraFrame2, WarpImg, H, Size(WarpImg.cols, WarpImg.rows));   
   IplImage* WarpImg =  cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);;
   
   //cvCreateImage(cvSize(T1Img->width*2, T1Img->height*2), T1Img->depth, T1Img->nChannels);

   cvMatMul( &gmxH, &mxH, &gmxH);   // Ma*Mb   -> Mc
   printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n", H[0], H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] );
   printf(" -----\n");
   printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n\n\n", gH[0], gH[1], gH[2], gH[3], gH[4], gH[5], gH[6], gH[7], gH[8] );
   
   //cvWarpAffine(current_Img, WarpImg, &gmxH);
   cvWarpPerspective(current_Img, WarpImg, &gmxH); 
   //cvWarpPerspective(Old_Img, WarpImg, &mxH); 


   //display
   cvNamedWindow("Stabilizing",WINDOW_NORMAL );
   cvShowImage("Stabilizing", WarpImg); 

   cvReleaseImage(&WarpImg);
   //
   //printf("[%d] - Sheta:%lf, Length:%lf\n",i , fVecSetha, fVecLength);   



   //cvWaitKey(0);
   //////////////////////////////////////////////////////////////////////////       

  }   
  cvShowImage( "Origin OpticalFlow", image);   

  //////////////////////////////////////////////////////////////////////////   

  //time delay   
  one_zero++;
  if( (one_zero % DELAY_T ) == 0)   
  {      
   one_zero=0;   
  }   

  //break      
  if( cvWaitKey(10) >= 0 )      
   break;      
 }      

 //release capture point      
 cvReleaseCapture(&capture);   
 //close the window      
 cvDestroyWindow( "Origin" );      

 cvReleaseImage(&Old_Img);    
 //////////////////////////////////////////////////////////////////////////   
 cvReleaseImage(&imgA);   
 cvReleaseImage(&imgB);    
 cvReleaseImage(&eig_image);
 cvReleaseImage(&tmp_image);
 delete cornersA;   
 delete cornersB;    
 cvReleaseImage(&pyrA);   
 cvReleaseImage(&pyrB);   


 //////////////////////////////////////////////////////////////////////////   
}   

---

17 comments:

  1. Anonymous3/9/14 15:27

    Hi, I have tried your code but after processing a few frames WarpImg becomes gray. What can cause that?

    ReplyDelete
  2. This is what I got after trying your code: http://tinypic.com/player.php?v=264tfnr%3E&s=8#.VAeuQ0s2z8s .... this is the original video http://tinypic.com/player.php?v=23wl0f8%3E&s=8#.VAetsEs2z8s ... do you have an idea of what may be causing this? thanks.

    ReplyDelete
  3. I am sorry, this source is basic method for stabilization study.
    In your video, two problem is caused in my source code.
    First is that H matrix is multiplied cumulatively, so error also cumulative.
    Second is that we have to choose right H matrix.

    I will upload better method example source code.
    Let's study more~ ^^

    ReplyDelete
    Replies
    1. hlo JH kim.i want a help from you.i,m srilanka.plz send me a reply to my email.shashiwarna92@gmail.com

      Delete
  4. Anonymous7/11/14 20:07

    Hi, JH Kim!
    I tried your code but i got some problem with unable to build the code. It appears errors as follow:
    error LNK2019: unresolved external symbol _cvDestroyWindow referenced in function _main
    error LNK2019: unresolved external symbol _cvReleaseCapture referenced in function _main
    error LNK2019: unresolved external symbol _cvWaitKey referenced in function _main
    error LNK2019: unresolved external symbol _cvReleaseImage referenced in function _main
    error LNK2019: unresolved external symbol _cvShowImage referenced in function _main
    error LNK2019: unresolved external symbol _cvWarpPerspective referenced in function _main
    error LNK2019: unresolved external symbol _cvGEMM referenced in function _main
    error LNK2019: unresolved external symbol _cvFindHomography referenced in function _main
    error LNK2019: unresolved external symbol _cvLine referenced in function _main
    error LNK2019: unresolved external symbol _cvCalcOpticalFlowPyrLK referenced in function _main

    I tried to find the solution on google' search but found no suitable one. Could you help me a better solution!
    Thank you very much!

    ReplyDelete
    Replies
    1. Did you set path opencv lib and include directory, and Are you located dll files of opencv able to your excute file reference?
      Check again plz.
      Thease problems are related to library link issue.
      Thank you

      Delete
    2. Anonymous8/11/14 18:56

      Dear JH Kim!
      I set path opencv lib in Library Directory and opencv include in Include Directory of Visual Studio 10. in addition, i declare variables in window environment variables but it was not done...

      Delete
    3. Anonymous8/11/14 21:42

      Dear JH Kim!
      I found the problem. I run it on the platform win32 while the OS is win 64x. However, your code run very slow, and could not response to real-time. I saw it is different to the video you posted on Youtube, isn't it?

      Delete
    4. The youtube video is result of this source code.
      The processing time can be different according to image size.
      In my case, the image size might be 640x480 or 800 x 600.

      In source code, high cost part are ->
      feature extraction
      matching
      homography
      warping

      check processing time of these parts.
      search "getTickCount" key word in this blog.
      you can find how to check processing time.

      thank you

      Delete
  5. Anonymous22/8/16 18:11

    How can i stabilize video use rotation translation and scale only? not perspective

    ReplyDelete
  6. This comment has been removed by the author.

    ReplyDelete
  7. sir if u can please expalin this code line by line.I am a beginner of opencv.

    ReplyDelete
    Replies
    1. 164 : Find homography matrix H
      182 : multiply by H
      188 : warping

      Other codes are tasks for this.

      Sorry for detail explain.

      Delete
  8. Anonymous29/1/17 05:11

    sir,I want to try this using sequence of images in a video & stabilize current frame with relevant to the previous frame not with the first frame.Can you explain how to do this.Thank you.

    ReplyDelete
    Replies
    1. Sorry, I cannot understand your question exactly..
      You mean.. just to get the variation between previous and current frame?
      If it is right, the logic is more simple.
      line 164 is most important.

      and

      One more tip..
      OpenCv has stabilization function.
      This code is for studying.

      Thank you.

      Delete
    2. Anonymous1/2/17 09:35

      Can u explain how to load frame sequence in a folder instead of loading a video?

      Delete
    3. I think this code will be help.
      http://study.marearts.com/2014/09/opencv-simple-source-code-video-frames.html

      but this is video frame to image, you can convert easily.

      Delete