Showing posts with label SIFT. Show all posts
Showing posts with label SIFT. Show all posts

12/13/2013

SURF_GPU example source code (feature finder using GPU )

This is example source code about SURF_GPU.
The time of processing is taken 0.99 sec on the 1787x1510 image size.
My environment is
Intel(r) core(TM) i5-3570 cpu@3.4ghz 3.80 Ghz
NVIDA Geforce GTX 650






Example source code



//////
#include < stdio.h >  
#include < opencv2\opencv.hpp >  
#include < opencv2\nonfree\gpu.hpp >


#ifdef _DEBUG  
#pragma comment(lib, "opencv_core246d.lib")   
//#pragma comment(lib, "opencv_imgproc246d.lib")   //MAT processing  
//#pragma comment(lib, "opencv_objdetect246d.lib")   
//#pragma comment(lib, "opencv_gpu246d.lib")  
#pragma comment(lib, "opencv_features2d246d.lib")  
#pragma comment(lib, "opencv_highgui246d.lib")  
//#pragma comment(lib, "opencv_ml246d.lib")
//#pragma comment(lib, "opencv_stitching246d.lib");
#pragma comment(lib, "opencv_nonfree246d.lib");

#else  
#pragma comment(lib, "opencv_core246.lib")  
//#pragma comment(lib, "opencv_imgproc246.lib")  
//#pragma comment(lib, "opencv_objdetect246.lib")  
//#pragma comment(lib, "opencv_gpu246.lib")  
#pragma comment(lib, "opencv_features2d246.lib")  
#pragma comment(lib, "opencv_highgui246.lib")  
//#pragma comment(lib, "opencv_ml246.lib")  
//#pragma comment(lib, "opencv_stitching246.lib");
#pragma comment(lib, "opencv_nonfree246.lib");
#endif  

using namespace cv;  
using namespace std;


void main()  
{
 //processign tiem measurement
 unsigned long AAtime=0, BBtime=0;

 //SURF_GPU example source code
 Mat inImg;
 vector src_keypoints;
 vector src_descriptors;

 gpu::GpuMat inImg_g;
 gpu::GpuMat src_keypoints_gpu, src_descriptors_gpu;

 //image load
 inImg = imread("ship.png",0);

 //FeatureFinder 
 gpu::SURF_GPU FeatureFinder_gpu(400);

 //processing time measure
 AAtime = getTickCount();
 inImg_g.upload(inImg);

 //Feature Extraction
 FeatureFinder_gpu(inImg_g, gpu::GpuMat(), src_keypoints_gpu, src_descriptors_gpu, false);

 //Processing time measurement
 BBtime = getTickCount(); 
 
 //descriptor down
 FeatureFinder_gpu.downloadKeypoints(src_keypoints_gpu, src_keypoints); 
 FeatureFinder_gpu.downloadDescriptors(src_descriptors_gpu, src_descriptors);
 
 //Features Draw
 //νŠΉμ§•μ  뿌리기 1
 drawKeypoints(inImg, src_keypoints, inImg, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
 
 imshow("Show", inImg); 

 printf("Processing time = %.2lf(sec) \n",  (BBtime - AAtime)/getTickFrequency() );
 printf("Features %d\n", src_keypoints.size() );

 waitKey(0);

 //save to file
 imwrite("output.jpg", inImg);
}

///

10/28/2013

Two view of cam to one screen using stitching algorithm(OpenCV, example source code), (mosaic)

This source code based on ->
http://feelmare.blogspot.kr/2011/08/two-image-mosaic-paranoma-based-on-sift.html
This link page introduces how to make a mosaic image from two adjacent images.
I made two cam video to one stitching video using the source code.

After run, operate the program of 3 keys.
'q' key is quit, 'p' key is processing(stitching), 'r' is reset.








Mat TwoInOneOut(Mat Left, Mat Right);

void main()
{
 VideoCapture stream1(0);   //0 is the id of video device.0 if you have only one camera
 VideoCapture stream2(1);   //0 is the id of video device.0 if you have only one camera
 
 if (!stream1.isOpened()) { //check if video device has been initialised
  cout << "cannot open camera 1";
 }

 if (!stream2.isOpened()) { //check if video device has been initialised
  cout << "cannot open camera 2";
 }


// namedWindow("Processing");
// namedWindow("Left");
// namedWindow("Right");

 Mat H;
 int mode=0;
 //unconditional loop
 while (true) {
  Mat cameraFrame1;
  stream1.read(cameraFrame1); //get one frame form video
  

  Mat cameraFrame2;
  stream2.read(cameraFrame2); //get one frame form video

  if(mode == 0)
  {
   imshow("Left", cameraFrame1);
   imshow("Right", cameraFrame2);
  }

  Mat Left(cameraFrame1.rows, cameraFrame1.cols, CV_8U);
  Mat Right(cameraFrame1.rows, cameraFrame1.cols, CV_8U);
  cvtColor(cameraFrame1, Left, CV_RGB2GRAY, CV_8U);
  cvtColor(cameraFrame2, Right, CV_RGB2GRAY, CV_8U);

  if (waitKey(30) == 'p')
  {
   printf("Homography Matrix Processing\n");
   H = TwoInOneOut(Left, Right);
   mode=1;
   destroyWindow("Left");
   destroyWindow("Right");
  }

  if(waitKey(30) == 'r')
  {
   printf("normal mode\n");
   destroyWindow("Processing");
   mode=0;
  }
  
  //printf("%d %d\n", H.cols, H.rows);
  if(H.cols == 3 && H.rows == 3)
  {
   Mat WarpImg( Left.rows*2, Left.cols*2, cameraFrame1.depth() );
      //printf("%d %d\n", A.depth(), A.channels());
   warpPerspective(cameraFrame2, WarpImg, H, Size(WarpImg.cols, WarpImg.rows));
   Mat tempWarpImg = WarpImg(Rect(0,0,Left.cols,Left.rows));
   cameraFrame1.copyTo(tempWarpImg);

   /*
   Mat WarpImg( Left.rows*2, Left.cols*2, CV_8U);
      //printf("%d %d\n", A.depth(), A.channels());
   warpPerspective(Right, WarpImg, H, Size(WarpImg.cols, WarpImg.rows));
   Mat tempWarpImg = WarpImg(Rect(0,0,Left.cols,Left.rows));
   Left.copyTo(tempWarpImg);
   */
   if(mode ==1)
    imshow("Processing", WarpImg );
  //Mat t = WarpImg( Rect(0,0,B.cols, B.rows));

  }

  //imshow("Processing", t );

  if (waitKey(30) == 'q')
   break;
 }

 destroyAllWindows();
 
 
}


Mat TwoInOneOut(Mat Left, Mat Right)
{
 Mat H;
 

 if(Left.channels() != 1 || Right.channels() != 1)
 {
  printf("Channel Error\n");
  return H;
 }

 /////////////////
 //Detect the keypoints using SURF Detector
    int minHessian = 300; //1500; 
    SurfFeatureDetector detector( minHessian );
 SurfDescriptorExtractor extractor;

 /////////////////
 //A
    std::vector< KeyPoint> kp_Left;
    detector.detect( Left, kp_Left );    
 Mat des_Left;
    extractor.compute( Left, kp_Left, des_Left );

 /////////////////
 //B 
 std::vector< KeyPoint> kp_Right;
 detector.detect( Right, kp_Right );
 Mat des_Right;
 extractor.compute( Right, kp_Right, des_Right );

 /////////////////
 //Match
 std::vector< vector< DMatch > > matches;
 FlannBasedMatcher matcher;
 matcher.knnMatch(des_Left, des_Right, matches, 2);
 //matcher.knnMatch(des_Right, des_Left, matches, 2);
 std::vector< DMatch > good_matches;
 good_matches.reserve(matches.size());  

 for (size_t i = 0; i < matches.size(); ++i)
 { 
  if (matches[i].size() < 2)
   continue;

  const DMatch &m1 = matches[i][0];
  const DMatch &m2 = matches[i][1];

  if(m1.distance <= 0.7 * m2.distance)        
   good_matches.push_back(m1);     
 }

 //Draw only "good" matches
 Mat img_matches;
    drawMatches( Left, kp_Left, Right, kp_Right, good_matches, 
  img_matches, Scalar::all(-1), Scalar::all(-1), 
  vector< char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
 imshow("Match", img_matches);

    /////////////////
 //Find H
 if(good_matches.size() > 20 )
 {
  std::vector< Point2f >  LeftMatchPT;
  std::vector< Point2f >  RightMatchPT;
  
  for( unsigned int i = 0; i < good_matches.size(); i++ )
  {
   //-- Get the keypoints from the good matches
   LeftMatchPT.push_back( kp_Left[ good_matches[i].queryIdx ].pt );
   RightMatchPT.push_back( kp_Right[ good_matches[i].trainIdx ].pt );
  }

  H = findHomography( RightMatchPT, LeftMatchPT, CV_RANSAC );
  //H = findHomography( LeftMatchPT,RightMatchPT, CV_RANSAC );
 }


 return H;


}

////
The source code
-> here

10/27/2011

Sift matching C++ source code / using opencv library

Created Date : 2011.10
Language : C/C++
Tool : Microsoft Visual C++ 2008
Library & Utilized : OpenCV 2.3
Reference : SIFT reference
etc. : template Image, WebCam







I made SIFT matching program using OpenCV 2.3.
I was wondering how to know the object pose.
In the internet, there are many source about sift, surf. But most of code introduced about only descripter and matching. There is no code to find object pose.
So I made this code and I should disclose this code.


This code uses openCV functions very useful.
cvExtractSURF, cvFindHomography...

I made matching code to the class. Class file name is MareMatchingClass.h/cpp.
You can use my class in the source very easily.

1. Create Matching class
   CMareMatchingClass MMathing;   

2.Input PatchImg
   MMathing.ExtractPatchSurf(PatchImg);

3.Find PatchImg in the background img
   MMathing.GetObjectRectAndBestH(BackGroundImg, &rect4pt);

4.Drawing the rect(rect4pt).
5.Repeat, go to the 3.

The class is consist of like below process;
1. Extract Feature -> use cvExtractSURF function
2. Find Matching point
3. Select some feature in the mached feature points, randomly.
4. calculate Homography matrix. This is geometry relationship between patch and background image.
5. transform features in the patch image by Homography matrix.
6. compare the transformed features to the background features.
7. evaluate how much is the homography exact.
7. repeat 4~6 and select best H.


<source code>

I think the source code is not best.
There are still shortage the source code.
It would need futher improvemnet.
so I want to discuss with you. Please leave your valueable opinion.
Thank you.
Have a nice day~. ^^

Oh~ english is very difficult.....


8/21/2011

Two Image mosaic (paranoma) based on SIFT / C++ source (OpenCV) / SIFT νŠΉμ§• μΆ”μΆœκΈ°λ₯Ό μ΄μš©ν•œ 두μž₯의 μ˜μƒμ„ λͺ¨μžμ΅(νŒŒλΌλ…Έλ§ˆ) μ˜μƒμœΌλ‘œ λ§Œλ“€κΈ°

Created Date : 2011.2
Language : C/C++
Tool : Microsoft Visual C++ 2010
Library & Utilized : OpenCV 2.2
Reference : Interent Reference
etc. : 2 adjacent images


two adjacent iamges

Feature extraction by Surf(SIFT)

Feature matching

Mosaic (paranoma)

This program is conducted as follow process.
First, the program finds feature point in each image using SURF.
->cvExtractSURF
Second, feature points on each images is matched by similarity.
->FindMatchingPoints
Third, We get the Homography matrix.
->cvFindHomography
Last, we warp the image for attaching into one image.
->cvWarpPerspective

You can download source here.
If you have good idea or advanced opinion, please reply me.
Thank you.

-----------------------------------------------------------------------------

μ΄μ›ƒλœ 두 μž₯의 μ˜μƒμ„ μž…λ ₯ λ°›μ•„ ν•˜λ‚˜μ˜ λͺ¨μžμ΄ν¬ μ˜μƒ(νŒŒλΌλ…Έλ§ˆ)으둜 λ§Œλ“ λ‹€.
νŠΉμ§• μΆ”μΆœ 및 비ꡐ 방법 : suft ->cvExtractSURF
νŠΉμ§• λ§€μΉ­ 방법 : FindMatchingPoints
호λͺ¨κ·ΈλΌν”Ό ν–‰λ ¬ κ΅¬ν•˜κΈ° : cvFindHomography
μ˜μƒ λͺ¨μžμ΄ν¬ 방법 : warpping

전체 μ†ŒμŠ€ μ½”λ“œλŠ” μ—¬κΈ°μ„œ 받을 수 μžˆμŠ΅λ‹ˆλ‹€.
https://github.com/MareArts/Two-Image-mosaic-paranoma-based-on-SIFT
κ°œμ„  μ‚¬ν•­μ΄λ‚˜ 쒋은 의견 μžˆμœΌμ‹œλ©΄ λ‹΅λ³€ μ£Όμ„Έμš”.
κ°μ‚¬ν•©λ‹ˆλ‹€.

< gist >

< /gist >