4/28/2014

(OpenCV Study) the example source code for using FarnebackOpticalFlow function (dense opticlal flow, gpu function)

This is GPU version of this post.
http://feelmare.blogspot.kr/2014/04/opencv-study-calcopticalflowfarneback.html

In the GPU mode, the return value of the function is two vector direction, that is x direction and y direction.
In the GPU mode, functions of resize, cvtColor cannot copy to same variable of gpumat.
And gputmat cannot access to the pixel point using at(x,y).


GPU is more fast about 10 times than cpu mode in my computer environment.

refer to this example source code and video.




#include < stdio.h>
#include < iostream>

#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2\gpu\gpu.hpp>

#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
#pragma comment(lib, "opencv_gpu247d.lib")
//#pragma comment(lib, "opencv_features2d247d.lib")
#pragma comment(lib, "opencv_highgui247d.lib")
#pragma comment(lib, "opencv_ml247d.lib")
//#pragma comment(lib, "opencv_stitching247d.lib");
//#pragma comment(lib, "opencv_nonfree247d.lib");
#pragma comment(lib, "opencv_video247d.lib")
#else
#pragma comment(lib, "opencv_core247.lib")
#pragma comment(lib, "opencv_imgproc247.lib")
#pragma comment(lib, "opencv_objdetect247.lib")
#pragma comment(lib, "opencv_gpu247.lib")
//#pragma comment(lib, "opencv_features2d247.lib")
#pragma comment(lib, "opencv_highgui247.lib")
#pragma comment(lib, "opencv_ml247.lib")
//#pragma comment(lib, "opencv_stitching247.lib");
//#pragma comment(lib, "opencv_nonfree247.lib");
#pragma comment(lib, "opencv_video247d.lib")
#endif 

using namespace cv;
using namespace std;


void drawOptFlowMap_gpu (const Mat& flow_x, const Mat& flow_y, Mat& cflowmap, int step, const Scalar& color) {

 

 for(int y = 0; y < cflowmap.rows; y += step)
        for(int x = 0; x < cflowmap.cols; x += step)
        {
   Point2f fxy; 
   fxy.x = cvRound( flow_x.at< float >(y, x) + x );
   fxy.y = cvRound( flow_y.at< float >(y, x) + y );
   
   line(cflowmap, Point(x,y), Point(fxy.x, fxy.y), color);
   circle(cflowmap, Point(fxy.x, fxy.y), 1, color, -1);
        }
}



int main()
{
 //resize scale
 int s=4;

 unsigned long AAtime=0, BBtime=0;

 //variables
 Mat GetImg, flow_x, flow_y, next, prvs;
 
 //gpu variable
 gpu::GpuMat prvs_gpu, next_gpu, flow_x_gpu, flow_y_gpu;
 gpu::GpuMat prvs_gpu_o, next_gpu_o;
 gpu::GpuMat prvs_gpu_c, next_gpu_c;

 //file name
 char fileName[100] = ".\\mm2.avi"; //Gate1_175_p1.avi"; //video\\mm2.avi"; //mm2.avi"; //cctv 2.mov"; //mm2.avi"; //";//_p1.avi";
 //video file open
 VideoCapture stream1(fileName);   //0 is the id of video device.0 if you have only one camera   
 if(!(stream1.read(GetImg))) //get one frame form video
  return 0;



 //////////////////////////////////////////////////////////////////////////////////////////////
 //resize(GetImg, prvs, Size(GetImg.size().width/s, GetImg.size().height/s) );
 //cvtColor(prvs, prvs, CV_BGR2GRAY);
 //prvs_gpu.upload(prvs);
 //////////////////////////////////////////////////////////////////////////////////////////////
 //gpu upload, resize, color convert
 prvs_gpu_o.upload(GetImg);
 gpu::resize(prvs_gpu_o, prvs_gpu_c, Size(GetImg.size().width/s, GetImg.size().height/s) );
 gpu::cvtColor(prvs_gpu_c, prvs_gpu, CV_BGR2GRAY);
 /////////////////////////////////////////////////////////////////////////////////////////////

 //dense optical flow
 gpu::FarnebackOpticalFlow fbOF;

 //unconditional loop   
 while (true) {   
  
  if(!(stream1.read(GetImg))) //get one frame form video   
   break;

  ///////////////////////////////////////////////////////////////////
  //resize(GetImg, next, Size(GetImg.size().width/s, GetImg.size().height/s) );
  //cvtColor(next, next, CV_BGR2GRAY);
  //next_gpu.upload(next);
  ///////////////////////////////////////////////////////////////////
  //gpu upload, resize, color convert
  next_gpu_o.upload(GetImg);
  gpu::resize(next_gpu_o, next_gpu_c, Size(GetImg.size().width/s, GetImg.size().height/s) );
  gpu::cvtColor(next_gpu_c, next_gpu, CV_BGR2GRAY);
  ///////////////////////////////////////////////////////////////////

  AAtime = getTickCount();
  //dense optical flow
  fbOF.operator()(prvs_gpu, next_gpu, flow_x_gpu, flow_y_gpu);
  //fbOF(prvs_gpu, next_gpu, flow_x_gpu, flow_y_gpu);
  BBtime = getTickCount();
  float pt = (BBtime - AAtime)/getTickFrequency();
  float fpt = 1/pt;
  printf("%.2lf / %.2lf \n",  pt, fpt );

  //copy for vector flow drawing
  Mat cflow;
  resize(GetImg, cflow, Size(GetImg.size().width/s, GetImg.size().height/s) );
  flow_x_gpu.download( flow_x );
  flow_y_gpu.download( flow_y );
  drawOptFlowMap_gpu(flow_x, flow_y, cflow, 10 , CV_RGB(0, 255, 0));
  imshow("OpticalFlowFarneback", cflow);

  ///////////////////////////////////////////////////////////////////
  //Display gpumat
  next_gpu.download( next );
  prvs_gpu.download( prvs );
  imshow("next", next );
  imshow("prvs", prvs );

  //prvs mat update
  prvs_gpu = next_gpu.clone();
  
  if (waitKey(5) >= 0)   
   break;
 }
}



4/25/2014

(OpenCV Study) calcOpticalFlowFarneback example source code ( dense optical flow )

refer to this web page
-> http://docs.opencv.org/trunk/doc/py_tutorials/py_video/py_lucas_kanade/py_lucas_kanade.html


dense optical flow is little bit different with feature tracking optical flow.
It search vector flow of all pixels.

In the output flow Mat, included vector point from self current point.
ex) The value of the flow Mat is 30,30 at the position of 20,20. The is directed to the from 20,20 to 30,30

The example source code draws direction of all pixels.
Drawing function is referenced from this site. http://stackoverflow.com/questions/16672003/how-to-extract-velocity-vectors-of-a-pixels-from-calcopticalflowfarneback






Disadvantage is the function is very slow.. gpu version function need. gpu mode source code is here-> http://www.youtube.com/watch?v=tg0oj4ObHlc&feature=youtu.be example soruce code here ->
#include < stdio.h>
#include < iostream>

#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")
//#pragma comment(lib, "opencv_features2d247d.lib")
#pragma comment(lib, "opencv_highgui247d.lib")
#pragma comment(lib, "opencv_ml247d.lib")
//#pragma comment(lib, "opencv_stitching247d.lib");
//#pragma comment(lib, "opencv_nonfree247d.lib");
#pragma comment(lib, "opencv_video247d.lib")
#else
#pragma comment(lib, "opencv_core247.lib")
#pragma comment(lib, "opencv_imgproc247.lib")
#pragma comment(lib, "opencv_objdetect247.lib")
//#pragma comment(lib, "opencv_gpu247.lib")
//#pragma comment(lib, "opencv_features2d247.lib")
#pragma comment(lib, "opencv_highgui247.lib")
#pragma comment(lib, "opencv_ml247.lib")
//#pragma comment(lib, "opencv_stitching247.lib");
//#pragma comment(lib, "opencv_nonfree247.lib");
#pragma comment(lib, "opencv_video247d.lib")
#endif 

using namespace cv;
using namespace std;


void drawOptFlowMap (const Mat& flow, Mat& cflowmap, int step, const Scalar& color) {
 for(int y = 0; y < cflowmap.rows; y += step)
        for(int x = 0; x < cflowmap.cols; x += step)
        {
            const Point2f& fxy = flow.at< Point2f>(y, x);
            line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
                 color);
            circle(cflowmap, Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), 1, color, -1);
        }
    }


int main()
{
 int s=5;
 //global variables
 Mat GetImg;
 Mat prvs, next; //current frame
 
 char fileName[100] = "mm2.avi"; //video\\mm2.avi"; //mm2.avi"; //cctv 2.mov"; //mm2.avi"; //";//_p1.avi";
 VideoCapture stream1(fileName);   //0 is the id of video device.0 if you have only one camera   

 if(!(stream1.read(GetImg))) //get one frame form video
  return 0;
 resize(GetImg, prvs, Size(GetImg.size().width/s, GetImg.size().height/s) );
 cvtColor(prvs, prvs, CV_BGR2GRAY);

 //unconditional loop   
 while (true) {   
  
  if(!(stream1.read(GetImg))) //get one frame form video   
   break;
  //Resize
  resize(GetImg, next, Size(GetImg.size().width/s, GetImg.size().height/s) );
  cvtColor(next, next, CV_BGR2GRAY);
  ///////////////////////////////////////////////////////////////////
  Mat flow;
  calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);

  Mat cflow;
  cvtColor(prvs, cflow, CV_GRAY2BGR);
  drawOptFlowMap(flow, cflow, 10, CV_RGB(0, 255, 0));
  imshow("OpticalFlowFarneback", cflow);

  ///////////////////////////////////////////////////////////////////
  //Display
  imshow("prvs", prvs);
  imshow("next", next);

  if (waitKey(5) >= 0)   
   break;

  prvs = next.clone();
 }

}



...

This video is result of source code.

(OpenCV Study) Background subtraction and Draw blob to red rectangle (example source code)

I use MOG2 algorithm to background subtraction.

The process is
resize to small for more fast processing
to blur for avoid noise affection
morphology make blob and remove noise
findContour to draw blob rectangle

Example movie of result.
more detail refer to source code..
#include < stdio.h>
#include < iostream>

#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")
//#pragma comment(lib, "opencv_features2d247d.lib")
#pragma comment(lib, "opencv_highgui247d.lib")
#pragma comment(lib, "opencv_ml247d.lib")
//#pragma comment(lib, "opencv_stitching247d.lib");
//#pragma comment(lib, "opencv_nonfree247d.lib");
#pragma comment(lib, "opencv_video247d.lib")
#else
#pragma comment(lib, "opencv_core247.lib")
#pragma comment(lib, "opencv_imgproc247.lib")
#pragma comment(lib, "opencv_objdetect247.lib")
//#pragma comment(lib, "opencv_gpu247.lib")
//#pragma comment(lib, "opencv_features2d247.lib")
#pragma comment(lib, "opencv_highgui247.lib")
#pragma comment(lib, "opencv_ml247.lib")
//#pragma comment(lib, "opencv_stitching247.lib");
//#pragma comment(lib, "opencv_nonfree247.lib");
#pragma comment(lib, "opencv_video247d.lib")
#endif 

using namespace cv;
using namespace std;



int main()
{

 //global variables
 Mat frame; //current frame
 Mat resize_blur_Img;
 Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
 Mat binaryImg;
 //Mat TestImg;
 Mat ContourImg; //fg mask fg mask generated by MOG2 method
 Ptr< BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
 
 pMOG2 = new BackgroundSubtractorMOG2(300,32,true);//300,0.0);
 
 char fileName[100] = "mm2.avi"; //video\\mm2.avi"; //mm2.avi"; //cctv 2.mov"; //mm2.avi"; //";//_p1.avi";
 VideoCapture stream1(fileName);   //0 is the id of video device.0 if you have only one camera   

 //morphology element
 Mat element = getStructuringElement(MORPH_RECT, Size(7, 7), Point(3,3) );   

 //unconditional loop   
 while (true) {   
  Mat cameraFrame;   
  if(!(stream1.read(frame))) //get one frame form video   
   break;
  
  //Resize
  resize(frame, resize_blur_Img, Size(frame.size().width/3, frame.size().height/3) );
  //Blur
  blur(resize_blur_Img, resize_blur_Img, Size(4,4) );
  //Background subtraction
  pMOG2->operator()(resize_blur_Img, fgMaskMOG2, -1);//,-0.5);
  
  ///////////////////////////////////////////////////////////////////
  //pre procesing
  //1 point delete
  //morphologyEx(fgMaskMOG2, fgMaskMOG2, CV_MOP_ERODE, element);
  morphologyEx(fgMaskMOG2, binaryImg, CV_MOP_CLOSE, element);
  //morphologyEx(fgMaskMOG2, testImg, CV_MOP_OPEN, element);

  //Shadow delete
  //Binary
  threshold(binaryImg, binaryImg, 128, 255, CV_THRESH_BINARY);

  //Find contour
  ContourImg = binaryImg.clone();
  //less blob delete
  vector< vector< Point> > contours;
  findContours(ContourImg,
            contours, // a vector of contours
            CV_RETR_EXTERNAL, // retrieve the external contours
            CV_CHAIN_APPROX_NONE); // all pixels of each contours

  vector< Rect > output;
  vector< vector< Point> >::iterator itc= contours.begin();
  while (itc!=contours.end()) {

   //Create bounding rect of object
   //rect draw on origin image
   Rect mr= boundingRect(Mat(*itc));
   rectangle(resize_blur_Img, mr, CV_RGB(255,0,0));
   ++itc;
  }
  

  ///////////////////////////////////////////////////////////////////

  //Display
  imshow("Shadow_Removed", binaryImg);
  imshow("Blur_Resize", resize_blur_Img);
  imshow("MOG2", fgMaskMOG2);

  if (waitKey(5) >= 0)   
   break;   
 }

}



4/24/2014

(OpenCV Study) Background subtractor MOG, MOG2, GMG example source code (BackgroundSubtractorMOG, BackgroundSubtractorMOG2, BackgroundSubtractorGMG)

Background subtractor example souce code.

OpenCV support about 3 types subtraction algorithm.

Those are MOG, MOG2, GMG algorithms.

Detailed algorithm explain is, please refer to opencv documnet.

In this post, just introduce source code and result of background subtraction.

firstly, see the results of 3 algorithms.

origin image

MOG result

MOG2 result

CMG result




Especially, in MOG2 result, we can see the gray pixel color, that means shadow.
This is result video of the source code.
source code is here->  

#include < stdio.h>
#include < iostream>

#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")
//#pragma comment(lib, "opencv_features2d247d.lib")
#pragma comment(lib, "opencv_highgui247d.lib")
#pragma comment(lib, "opencv_ml247d.lib")
//#pragma comment(lib, "opencv_stitching247d.lib");
//#pragma comment(lib, "opencv_nonfree247d.lib");
#pragma comment(lib, "opencv_video247d.lib")
#else
#pragma comment(lib, "opencv_core247.lib")
#pragma comment(lib, "opencv_imgproc247.lib")
#pragma comment(lib, "opencv_objdetect247.lib")
//#pragma comment(lib, "opencv_gpu247.lib")
//#pragma comment(lib, "opencv_features2d247.lib")
#pragma comment(lib, "opencv_highgui247.lib")
#pragma comment(lib, "opencv_ml247.lib")
//#pragma comment(lib, "opencv_stitching247.lib");
//#pragma comment(lib, "opencv_nonfree247.lib");
#pragma comment(lib, "opencv_video247d.lib")
#endif 

using namespace cv;
using namespace std;



int main()
{

 //global variables
 Mat frame; //current frame
 Mat resizeF;
 Mat fgMaskMOG; //fg mask generated by MOG method
 Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
 Mat fgMaskGMG; //fg mask fg mask generated by MOG2 method
 

 Ptr< BackgroundSubtractor> pMOG; //MOG Background subtractor
 Ptr< BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
 Ptr< BackgroundSubtractorGMG> pGMG; //MOG2 Background subtractor
 


 pMOG = new BackgroundSubtractorMOG();
 pMOG2 = new BackgroundSubtractorMOG2();
 pGMG = new BackgroundSubtractorGMG();
 

 char fileName[100] = "C:\\POSCO\\video\\/cctv 2.mov"; //Gate1_175_p1.avi"; //mm2.avi"; //";//_p1.avi";
 VideoCapture stream1(fileName);   //0 is the id of video device.0 if you have only one camera   

 Mat element = getStructuringElement(MORPH_RECT, Size(3, 3), Point(1,1) );   

 //unconditional loop   
 while (true) {   
  Mat cameraFrame;   
  if(!(stream1.read(frame))) //get one frame form video   
   break;
  
  resize(frame, resizeF, Size(frame.size().width/4, frame.size().height/4) );
  pMOG->operator()(resizeF, fgMaskMOG);
  pMOG2->operator()(resizeF, fgMaskMOG2);
  pGMG->operator()(resizeF, fgMaskGMG);
  //morphologyEx(fgMaskGMG, fgMaskGMG, CV_MOP_OPEN, element); 

 


  imshow("Origin", resizeF);
  imshow("MOG", fgMaskMOG);
  imshow("MOG2", fgMaskMOG2);
  imshow("GMG", fgMaskGMG);
  

  if (waitKey(30) >= 0)   
   break;   
 }

}




..


refer to this url that is 3.2 version example.
http://study.marearts.com/2017/04/opencv-background-subtraction-32.html

4/22/2014

(OpenCV Study) Mat point access method, get pixel

1. At approach

Mat image (ROW, COL, CV_TYPE);
image.at (WANT_ROW, WANT_COL);

- ROW: Row
- COL: Column
- CV_TYPE: data type ( for example : CV_8UC3 = 8 bit 3 channels)
- DATA_TYPE: Mat creation data type ( for example : float, usigned char)
- WANT_ROW: access to the desired row
- WANT_COL: access to the desired column

[Advantage]: Access after validation progress , so safe and accurate approach .
[Disadvantage]:  most slow in 3 ways.

2. Ptr approach
Mat image (ROW, COL, CV_TYPE);
image.ptr (WANT_ROW, WANT_COL); (This access is changed to the Point)

- ROW: Row
- COL: Column- CV_TYPE: data type ( for example : CV_8UC3 = 8 bit 3 channels)
- DATA_TYPE: Mat creation data type ( for example : float, usigned char)
- WANT_ROW: access to the desired row
- WANT_COL: access to the desired column

[Advantage]: Access is faster than first way.
[Disadvantage]: direct access to the data , but way slower than third way .



3. Data approach
Mat image (ROW, COL, CV_TYPE);
DATA_TYPE * data = (DATA_TYPE *) image.data;data [WANT_ROW * image.cols + WANT_COL]

- ROW: Row
- COL: Column
- CV_TYPE: data type ( for example : CV_8UC3 = 8 bit 3 channels)
- DATA_TYPE: Mat creation data type ( for example : float, usigned char)
- WANT_ROW: access to the desired row
- WANT_COL: access to the desired column

[Advantage]: very fast.
[Disadvantage]: not check validation ,  it is hard to know inadequate access.


This is sample code.
http://study.marearts.com/2016/06/opencv-pixel-access-at-ptr-data.html

4/18/2014

To test SVM trained data is whether reliable or not. (example source code)

After training SVM, we should test the trained XML data is reliable or not..

The method to extract HOG feature is refer to -> http://feelmare.blogspot.kr/2014/04/example-source-code-of-extract-hog.html
The method to training SVM of HOG feature is refer to -> http://feelmare.blogspot.kr/2014/04/example-source-code-hog-feature-to.html

The method is using training data.
Again training data make HOG feature, and check the feature is positive or not using trained SVM data.

The example source code is like that.
...
#include < stdio.h>
#include < opencv2\opencv.hpp>
//#include < opencv2\gpu\gpu.hpp>

using namespace cv;
using namespace std;


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")         
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing        
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")        
//#pragma comment(lib, "opencv_features2d247d.lib")        
#pragma comment(lib, "opencv_highgui247d.lib")        
#pragma comment(lib, "opencv_ml247d.lib")      
//#pragma comment(lib, "opencv_stitching247d.lib");      
//#pragma comment(lib, "opencv_nonfree247d.lib");      
  
#else        
#pragma comment(lib, "opencv_core247.lib")        
#pragma comment(lib, "opencv_imgproc247.lib")        
#pragma comment(lib, "opencv_objdetect247.lib")        
//#pragma comment(lib, "opencv_gpu247.lib")        
//#pragma comment(lib, "opencv_features2d247.lib")        
#pragma comment(lib, "opencv_highgui247.lib")        
#pragma comment(lib, "opencv_ml247.lib")        
//#pragma comment(lib, "opencv_stitching247.lib");      
//#pragma comment(lib, "opencv_nonfree247.lib");      
#endif 


void main()
{

 //variables
 char FullFileName[100];
 char FirstFileName[100]="./images/upperbody"; //"./NegaImages/Negative";      // 
 int FileNum=96; //262;

 //Load trained SVM xml data
 CvSVM svm;
 svm.load("trainedSVM.xml");

 //count variable
 int nnn=0, ppp=0;

 for(int i=0; i< FileNum; ++i)
 {
  sprintf_s(FullFileName, "%s%d.png", FirstFileName, i+1);
  //printf("%s\n", FullFileName);

  //read image file
  Mat img, img_gray;
  img = imread(FullFileName);
  
  //resizing
  //resize(img, img, Size(16,8) ); //Size(64,48) ); //Size(32*2,16*2)); //Size(80,72) ); 
  resize(img, img, Size(64,48) ); //Size(32*2,16*2)); //Size(80,72) ); 
  //gray
  cvtColor(img, img_gray, CV_RGB2GRAY);

  //Extract HogFeature
  HOGDescriptor d( Size(32,16), Size(8,8), Size(4,4), Size(4,4), 9);
  vector< float> descriptorsValues;
  vector< Point> locations;
  d.compute( img_gray, descriptorsValues, Size(0,0), Size(0,0), locations);
  //vector to Mat
  Mat fm = Mat(descriptorsValues);
  
  //Classification whether data is positive or negative
  int result = svm.predict(fm);
  printf("%s - > %d\n", FullFileName, result);

  //Count data
  if(result == 1)
   ppp++;
  else
   nnn++;

  //show image
  imshow("origin", img);

  waitKey(5);
 }

 printf(" positive/negative = (%d/%d) \n", ppp, nnn);

}
---


Example source code Hog feature to learning by SVM, (SVM, HOGdescriptor)

After extract HOG feature from images, we have to learn for classify data.
This post introduces the method to use machine learning of SVM.

Firstly, you have to prepare postive hog features data and negative hog features data by XML, or TXT, and so on...
The method to get Hog feature, refer to this page -> http://feelmare.blogspot.kr/2014/04/example-source-code-of-extract-hog.html

I already got postive.xml and negative.xml from images.
This example source code learn using SVM from postive.xml, negative.xml.
And save the result fo learning to XML file.

After get SVM trained xml data, we can classify input data whether  positive or not.

This is training source code using SVM.

#include < stdio.h>
#include < opencv2\opencv.hpp>
//#include < opencv2\gpu\gpu.hpp>

using namespace cv;
using namespace std;


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")         
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing        
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")        
//#pragma comment(lib, "opencv_features2d247d.lib")        
#pragma comment(lib, "opencv_highgui247d.lib")        
#pragma comment(lib, "opencv_ml247d.lib")      
//#pragma comment(lib, "opencv_stitching247d.lib");      
//#pragma comment(lib, "opencv_nonfree247d.lib");      
  
#else        
#pragma comment(lib, "opencv_core247.lib")        
#pragma comment(lib, "opencv_imgproc247.lib")        
#pragma comment(lib, "opencv_objdetect247.lib")        
//#pragma comment(lib, "opencv_gpu247.lib")        
//#pragma comment(lib, "opencv_features2d247.lib")        
#pragma comment(lib, "opencv_highgui247.lib")        
#pragma comment(lib, "opencv_ml247.lib")        
//#pragma comment(lib, "opencv_stitching247.lib");      
//#pragma comment(lib, "opencv_nonfree247.lib");      
#endif 


void main()
{
 
 //Read Hog feature from XML file
 ///////////////////////////////////////////////////////////////////////////
 printf("1. Feature data xml load\n");
 //create xml to read
 FileStorage read_PositiveXml("Positive.xml", FileStorage::READ);
 FileStorage read_NegativeXml("Negative.xml", FileStorage::READ);

 //Positive Mat
 Mat pMat;
 read_PositiveXml["Descriptor_of_images"] >> pMat;
 //Read Row, Cols
 int pRow,pCol;
 pRow = pMat.rows; pCol = pMat.cols;

 //Negative Mat
 Mat nMat;
 read_NegativeXml["Descriptor_of_images"] >> nMat;
 //Read Row, Cols
 int nRow,nCol;
 nRow = nMat.rows; nCol = nMat.cols;

 //Rows, Cols printf
 printf("   pRow=%d pCol=%d, nRow=%d nCol=%d\n", pRow, pCol, nRow, nCol );
 //release
 read_PositiveXml.release();
 //release
 read_NegativeXml.release();
 /////////////////////////////////////////////////////////////////////////////////

 //Make training data for SVM
 /////////////////////////////////////////////////////////////////////////////////
 printf("2. Make training data for SVM\n");
 //descriptor data set
 Mat PN_Descriptor_mtx( pRow + nRow, pCol, CV_32FC1 ); //in here pCol and nCol is descriptor number, so two value must be same;
 memcpy(PN_Descriptor_mtx.data, pMat.data, sizeof(float) * pMat.cols * pMat.rows );
 int startP = sizeof(float) * pMat.cols * pMat.rows;
 memcpy(&(PN_Descriptor_mtx.data[ startP ]), nMat.data, sizeof(float) * nMat.cols * nMat.rows );
 //data labeling
 Mat labels( pRow + nRow, 1, CV_32FC1, Scalar(-1.0) );
    labels.rowRange( 0, pRow ) = Scalar( 1.0 );
 /////////////////////////////////////////////////////////////////////////////////

 //Set svm parameter
 /////////////////////////////////////////////////////////////////////////////////
 printf("4. SVM training\n");
 CvSVM svm;
 CvSVMParams params;
 params.svm_type = CvSVM::C_SVC;
    params.kernel_type = CvSVM::LINEAR;
    params.term_crit = cvTermCriteria( CV_TERMCRIT_ITER, 10000, 1e-6 );
 /////////////////////////////////////////////////////////////////////////////////

 //Training
 /////////////////////////////////////////////////////////////////////////////////
 svm.train(PN_Descriptor_mtx, labels, Mat(), Mat(), params);

 //Trained data save
 /////////////////////////////////////////////////////////////////////////////////
 printf("5. SVM xml save\n");
 svm.save( "trainedSVM.xml" );
 
// FileStorage hogXml("testXML.xml", FileStorage::WRITE); //FileStorage::READ
// write(hogXml, "Data", PN_Descriptor_mtx);
// write(hogXml, "Label", labels);
// hogXml.release();
}
---

After learning, the method to classify is refer to http://feelmare.blogspot.kr/2014/04/to-test-svm-trained-data-is-whether.html

4/17/2014

OpenCV Study, Merging to extended Mat from 2 Mat (example source code)

Example of merging to extended Mat from 2 Mat

For example,
A=[1 2 3; 4 5 6];
B=[7 8 9; 3 2 1];
C=[A; B];  < - how to make this merging Mat??

refer to this example source code.

---

Mat A(3, 10, CV_32F);
 Mat B(4, 10, CV_32F);

 int cnt=0;
 for(int i=0; i< A.rows; ++i)
 {
  for(int j=0; j< A.cols; ++j)
  {
   A.at< float>(i,j) = float(i*j);
  }
 }

 cout << "A" << endl;
 cout << A << endl << endl;
 
 for(int i=0; i< B.rows; ++i)
 {
  for(int j=0; j< B.cols; ++j)
  {
   B.at< float>(i,j) = float(i*j)*10;
  }
 }

 cout << "B" << endl;
 cout << B << endl << endl;
 


 Mat C(A.rows + B.rows, A.cols, CV_32F);
 memcpy(C.data, A.data, sizeof(float) * A.cols * A.rows );
 int startP = sizeof(float) * A.cols * A.rows;
 memcpy(&(C.data[ startP ]), B.data, sizeof(float) * B.cols * B.rows );

 cout << "C = [A; B]" << endl;
 cout << C << endl << endl;


...
The result of example source code..
 

Example source code of extract HOG feature from images, save descriptor values to xml file, using opencv (using HOGDescriptor )

This example source code is to extract HOG feature from images.
And save descriptors to XML file.

The source code explain how to use HOGDescriptor function.



..
#include < stdio.h>
#include < opencv2\opencv.hpp>
//#include < opencv2\gpu\gpu.hpp>

using namespace cv;
using namespace std;


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")         
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing        
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")        
//#pragma comment(lib, "opencv_features2d247d.lib")        
#pragma comment(lib, "opencv_highgui247d.lib")        
//#pragma comment(lib, "opencv_ml247d.lib")      
//#pragma comment(lib, "opencv_stitching247d.lib");      
//#pragma comment(lib, "opencv_nonfree247d.lib");      
  
#else        
#pragma comment(lib, "opencv_core247.lib")        
#pragma comment(lib, "opencv_imgproc247.lib")        
#pragma comment(lib, "opencv_objdetect247.lib")        
//#pragma comment(lib, "opencv_gpu247.lib")        
//#pragma comment(lib, "opencv_features2d247.lib")        
#pragma comment(lib, "opencv_highgui247.lib")        
//#pragma comment(lib, "opencv_ml247.lib")        
//#pragma comment(lib, "opencv_stitching247.lib");      
//#pragma comment(lib, "opencv_nonfree247.lib");      
#endif 


void main()
{
 //variables
 char FullFileName[100];
 char FirstFileName[100]="./images/upperbody";
 char SaveHogDesFileName[100] = "Positive.xml";
 int FileNum=96;

 vector< vector < float> > v_descriptorsValues;
 vector< vector < Point> > v_locations;


 for(int i=0; i< FileNum; ++i)
 {
  sprintf_s(FullFileName, "%s%d.png", FirstFileName, i+1);
  printf("%s\n", FullFileName);

  //read image file
  Mat img, img_gray;
  img = imread(FullFileName);
  
  //resizing
  resize(img, img, Size(64,48) ); //Size(64,48) ); //Size(32*2,16*2)); //Size(80,72) ); 
  //gray
  cvtColor(img, img_gray, CV_RGB2GRAY);

  //extract feature
  HOGDescriptor d( Size(32,16), Size(8,8), Size(4,4), Size(4,4), 9);
  vector< float> descriptorsValues;
  vector< Point> locations;
  d.compute( img_gray, descriptorsValues, Size(0,0), Size(0,0), locations);

  //printf("descriptor number =%d\n", descriptorsValues.size() );
  v_descriptorsValues.push_back( descriptorsValues );
  v_locations.push_back( locations );
  //show image
  imshow("origin", img);

  waitKey(5);
 }

 //refer to this address -> http://feelmare.blogspot.kr/2014/04/the-example-source-code-of-2d-vector.html
 //save to xml
 FileStorage hogXml(SaveHogDesFileName, FileStorage::WRITE); //FileStorage::READ
 //2d vector to Mat
 int row=v_descriptorsValues.size(), col=v_descriptorsValues[0].size();
 printf("col=%d, row=%d\n", row, col);
 Mat M(row,col,CV_32F);
 //save Mat to XML
 for(int i=0; i< row; ++i)  
  memcpy( &(M.data[col * i * sizeof(float) ]) ,v_descriptorsValues[i].data(),col*sizeof(float));
 //write xml
 write(hogXml, "Descriptor_of_images",  M);

 //write(hogXml, "Descriptor", v_descriptorsValues );
 //write(hogXml, "locations", v_locations );
 hogXml.release();

}



...


same with above code.
< gist code start>

< gist code end >


The example source code of 2d vector write and read to the XML file, using OpenCV ( and also introcuded 2D vector conver to Mat and Mat to 2d Vector converting example source code)

1d vector convert to Mat
Mat convert to 1d vector
refert to this post http://feelmare.blogspot.kr/2014/01/opencv-vector-to-mat-mat-to-vector.html


This post is about 2d vector write to the XML file.
And read the XML file and data assigned to 2d vector again.

To complete this process, I use 2d vector convert to Mat and Mat convert to 2D vector.
The method to converting 2D vector, Mat is like that..

...
//2D vector to Mat
//create Mat   
Mat M(row,col,CV_32F);
//copy 2d vector to mat  
for(int i=0; i< row; ++i)  
memcpy( &(M.data[col * i * sizeof(float) ]) ,vv_Test[i].data(),col*sizeof(float)); 



//Mat to 2D vector
//copy from Mat to 2d Vector
for(int i=0; i< row; ++i)
{
 vector< float > temp;
 int start=col * i * sizeof(float);
 int end=start + col*sizeof(float)-1;
 temp.assign( (float*)(&(M2.data[start])), (float*)(&(M2.data[end])) );
 vv_Test2.push_back(temp);
}
---





This example source is save 2d vector to XML and read xml and copy to 2D vector variable.
...
#include < stdio.h>
#include < opencv2\opencv.hpp>
//#include < opencv2\gpu\gpu.hpp>

using namespace cv;
using namespace std;


#ifdef _DEBUG        
#pragma comment(lib, "opencv_core247d.lib")         
#pragma comment(lib, "opencv_imgproc247d.lib")   //MAT processing        
#pragma comment(lib, "opencv_objdetect247d.lib") //HOGDescriptor
//#pragma comment(lib, "opencv_gpu247d.lib")        
//#pragma comment(lib, "opencv_features2d247d.lib")        
#pragma comment(lib, "opencv_highgui247d.lib")        
//#pragma comment(lib, "opencv_ml247d.lib")      
//#pragma comment(lib, "opencv_stitching247d.lib");      
//#pragma comment(lib, "opencv_nonfree247d.lib");      
  
#else        
#pragma comment(lib, "opencv_core247.lib")        
#pragma comment(lib, "opencv_imgproc247.lib")        
#pragma comment(lib, "opencv_objdetect247.lib")        
//#pragma comment(lib, "opencv_gpu247.lib")        
//#pragma comment(lib, "opencv_features2d247.lib")        
#pragma comment(lib, "opencv_highgui247.lib")        
//#pragma comment(lib, "opencv_ml247.lib")        
//#pragma comment(lib, "opencv_stitching247.lib");      
//#pragma comment(lib, "opencv_nonfree247.lib");      
#endif 


void main()
{
 /////////////////////////////////////////////////////////////
 ////Write xml example

 //variables
 vector< vector < float > > vv_Test;
 int row = 5, col = 10;
 //make vector values
 for(int i=0; i< 5; ++i)
 {
  vector< float > vTest;
  for(int j=0; j< 10; ++j)
   vTest.push_back(i*j);

  vv_Test.push_back( vTest );
 }

 //create xml to write
 FileStorage write_hogXml("V_writeTest.xml", FileStorage::WRITE); //FileStorage::READ
 //create Mat   
 Mat M(row,col,CV_32F);
 //copy 2d vector to mat  
 for(int i=0; i< row; ++i)  
  memcpy( &(M.data[col * i * sizeof(float) ]) ,vv_Test[i].data(),col*sizeof(float)); 
 //write xml
 write(write_hogXml, "vectorTest",  M);
 //release
 write_hogXml.release();


 ///////////////////////////////////////////////////////////////////////////
 //read xml example 
 //create xml to read
 FileStorage read_hogXml("V_writeTest.xml", FileStorage::READ); //FileStorage::READ
 //Create Mat
 int row2,col2;
 //create Mat, 2d vector
 Mat M2; 
 vector< vector < float > > vv_Test2;
 //read data into Mat
 read( read_hogXml["vectorTest"], M2);
 row2 = M2.rows;
 col2 = M2.cols;
 printf("%d %d\n", row2, col2);
 //read_hogXml["vectorTest"] >> M2; //same 
 //copy from Mat to 2d Vector
 for(int i=0; i< row2; ++i)
 {
  vector< float > temp;
  int start=col2 * i * sizeof(float);
  int end=start + col2*sizeof(float)-1;
  temp.assign( (float*)(&(M2.data[start])), (float*)(&(M2.data[end])) );
  vv_Test2.push_back(temp);
 }
 //release
 read_hogXml.release();
 

 ///////////////////////////////////////////////////////////////////////////////////
 printf("read data confirm!! \n");
 for(int i=0; i< vv_Test2.size(); ++i)
 {
  vector< float > vTest;
  for(int j=0; j< vv_Test2[i].size(); ++j)
   printf("%.0f ", vv_Test2[i][j] );

  printf("\n");
 }

}

---

result of save XML file.
 
result of read XML and print 2D vector

 

4/11/2014

Video Stabilization example source code, (using cvFindHomography, cvWarpPerspective functions in openCV)

Video stabilization example source code.

The principle is like that...



Firstly, to obtain 2 adjacent images
extract good feature to track.
Match each features between 2 images
Get Homography matrix
Warp current image to old image by using H.
However, H is multiplied by cumulatively.

Optical flow source code is based on http://feelmare.blogspot.kr/2012/10/optical-flow-sample-source-code-using.html

This is the result of video stabilizing.
This is example source code. ---
#define MAX_COUNT 250   
#define DELAY_T 3
#define PI 3.1415   


void main()   
{   

 //////////////////////////////////////////////////////////////////////////   
 //image class         
 IplImage* image = 0;   

 //T, T-1 image   
 IplImage* current_Img = 0;   
 IplImage* Old_Img = 0;   

 //Optical Image   
 IplImage * imgA=0;   
 IplImage * imgB=0;   


 //Video Load   
 CvCapture * capture = cvCreateFileCapture("cam1.wmv"); //cvCaptureFromCAM(0); //cvCreateFileCapture("1.avi");   

 //Window   
 cvNamedWindow( "Origin OpticalFlow" , WINDOW_NORMAL);
 //cvNamedWindow( "RealOrigin" , WINDOW_NORMAL);
 //////////////////////////////////////////////////////////////////////////   


 //////////////////////////////////////////////////////////////////////////    
 //Optical Flow Variables    
 IplImage * eig_image=0;
 IplImage * tmp_image=0;
 int corner_count = MAX_COUNT;   
 CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_COUNT ];   
 CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_COUNT ];   

 CvSize img_sz;   
 int win_size=20;   

 IplImage* pyrA=0;   
 IplImage* pyrB=0;   

 char features_found[ MAX_COUNT ];   
 float feature_errors[ MAX_COUNT ];   
 //////////////////////////////////////////////////////////////////////////   


 //////////////////////////////////////////////////////////////////////////   
 //Variables for time different video   
 int one_zero=0;   
 //int t_delay=0;   

 double gH[9]={1,0,0, 0,1,0, 0,0,1};
 CvMat gmxH = cvMat(3, 3, CV_64F, gH);



 //Routine Start   
 while(1) {      


  //capture a frame form cam      
  if( cvGrabFrame( capture ) == 0 )   
   break;   
  //image = cvRetrieveFrame( capture );   
  //cvShowImage("RealOrigin", image );


  //Image Create   
  if(Old_Img == 0)      
  {      
   image = cvRetrieveFrame( capture );   
   current_Img = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   memcpy(current_Img->imageData, image->imageData, sizeof(char)*image->imageSize );
   Old_Img  = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   one_zero=1;
  }   



  if(one_zero == 0 )   
  {   
   if(eig_image == 0)
   {
    eig_image = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
    tmp_image = cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);
   }

   //copy to image class   
   memcpy(Old_Img->imageData, current_Img->imageData, sizeof(char)*image->imageSize );   
   image = cvRetrieveFrame( capture );   
   memcpy(current_Img->imageData, image->imageData, sizeof(char)*image->imageSize );   

   //////////////////////////////////////////////////////////////////////////   
   //Create image for Optical flow   
   if(imgA == 0)   
   {   
    imgA = cvCreateImage( cvSize(image->width, image->height), IPL_DEPTH_8U, 1);   
    imgB = cvCreateImage( cvSize(image->width, image->height), IPL_DEPTH_8U, 1);       
   }      

   //RGB to Gray for Optical Flow   
   cvCvtColor(current_Img, imgA, CV_BGR2GRAY);   
   cvCvtColor(Old_Img, imgB, CV_BGR2GRAY);      

   //extract features
   cvGoodFeaturesToTrack(imgA, eig_image, tmp_image, cornersA, &corner_count, 0.01, 5.0, 0, 3, 0, 0.04);   
   cvFindCornerSubPix(imgA, cornersA, corner_count, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));      


   CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );   
   if( pyrA == 0)   
   {    
    pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1);   
    pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1);   
   }   

   //Optical flow
   cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, cvSize(win_size, win_size), 5, features_found, feature_errors, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3), 0);   

   /////////////////////////////////////////////////////////////////////////      
   int fCount=0;
   for(int i=0; i < corner_count; ++i)
   {

    if( features_found[i] == 0 || feature_errors[i] > MAX_COUNT )
     continue;

    fCount++;
    //////////////////////////////////////////////////////////////////////////       
    //Vector Length   
    //float fVecLength = sqrt((float)((cornersA[i].x-cornersB[i].x)*(cornersA[i].x-cornersB[i].x)+(cornersA[i].y-cornersB[i].y)*(cornersA[i].y-cornersB[i].y)));   
    //Vector Angle   
    //float fVecSetha  = fabs( atan2((float)(cornersB[i].y-cornersA[i].y), (float)(cornersB[i].x-cornersA[i].x)) * 180/PI );   
    //cvLine( image, cvPoint(cornersA[i].x, cornersA[i].y), cvPoint(cornersB[i].x, cornersA[i].y), CV_RGB(0, 255, 0), 2);    
   }

   printf("%d \n", fCount);

   int inI=0;
   CvPoint2D32f* pt1 = new CvPoint2D32f[ fCount ];
   CvPoint2D32f * pt2 = new CvPoint2D32f[ fCount ];
   for(int i=0; i < corner_count; ++i)
   {
    if( features_found[i] == 0 || feature_errors[i] > MAX_COUNT )
     continue;
    pt1[inI] = cornersA[i];
    pt2[inI] = cornersB[i];
    
    cvLine( image, cvPoint(pt1[inI].x, pt1[inI].y), cvPoint(pt2[inI].x, pt2[inI].y), CV_RGB(0, 255, 0), 2);    
    inI++;
   }

   //FindHomography
   CvMat M1, M2;
   double H[9];
   CvMat mxH = cvMat(3, 3, CV_64F, H);
   M1 = cvMat(1, fCount, CV_32FC2, pt1);
   M2 = cvMat(1, fCount, CV_32FC2, pt2);

   //M2 = H*M1 , old = H*current
   if( !cvFindHomography(&M1, &M2, &mxH, CV_RANSAC, 2))  //if( !cvFindHomography(&M1, &M2, &mxH, CV_RANSAC, 2))
   {                 
    printf("Find Homography Fail!\n");
    
   }else{
    //printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n", H[0], H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] );
   }

   delete pt1;
   delete pt2;    

   //warping by H
   //warpAffine(warped_2,warped_3,Transform_avg,Size( reSizeMat.cols, reSizeMat.rows));
   //warpPerspective(cameraFrame2, WarpImg, H, Size(WarpImg.cols, WarpImg.rows));   
   IplImage* WarpImg =  cvCreateImage(cvSize(image->width, image->height), image->depth, image->nChannels);;
   
   //cvCreateImage(cvSize(T1Img->width*2, T1Img->height*2), T1Img->depth, T1Img->nChannels);

   cvMatMul( &gmxH, &mxH, &gmxH);   // Ma*Mb   -> Mc
   printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n", H[0], H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8] );
   printf(" -----\n");
   printf(" %lf %lf %lf \n %lf %lf %lf \n %lf %lf %lf\n\n\n", gH[0], gH[1], gH[2], gH[3], gH[4], gH[5], gH[6], gH[7], gH[8] );
   
   //cvWarpAffine(current_Img, WarpImg, &gmxH);
   cvWarpPerspective(current_Img, WarpImg, &gmxH); 
   //cvWarpPerspective(Old_Img, WarpImg, &mxH); 


   //display
   cvNamedWindow("Stabilizing",WINDOW_NORMAL );
   cvShowImage("Stabilizing", WarpImg); 

   cvReleaseImage(&WarpImg);
   //
   //printf("[%d] - Sheta:%lf, Length:%lf\n",i , fVecSetha, fVecLength);   



   //cvWaitKey(0);
   //////////////////////////////////////////////////////////////////////////       

  }   
  cvShowImage( "Origin OpticalFlow", image);   

  //////////////////////////////////////////////////////////////////////////   

  //time delay   
  one_zero++;
  if( (one_zero % DELAY_T ) == 0)   
  {      
   one_zero=0;   
  }   

  //break      
  if( cvWaitKey(10) >= 0 )      
   break;      
 }      

 //release capture point      
 cvReleaseCapture(&capture);   
 //close the window      
 cvDestroyWindow( "Origin" );      

 cvReleaseImage(&Old_Img);    
 //////////////////////////////////////////////////////////////////////////   
 cvReleaseImage(&imgA);   
 cvReleaseImage(&imgB);    
 cvReleaseImage(&eig_image);
 cvReleaseImage(&tmp_image);
 delete cornersA;   
 delete cornersB;    
 cvReleaseImage(&pyrA);   
 cvReleaseImage(&pyrB);   


 //////////////////////////////////////////////////////////////////////////   
}   

---