1/28/2015

Canny edge detector, example source code in opencv

Canny edge processing example



cpu version result



gpu version result






...
cpu version code.
#include < time.h>  
#include < opencv2\opencv.hpp>  
#include < opencv2\gpu\gpu.hpp>  
#include < string>  
#include < stdio.h>  


#ifdef _DEBUG          
#pragma comment(lib, "opencv_core249d.lib")  
#pragma comment(lib, "opencv_imgproc249d.lib")   //MAT processing  
//#pragma comment(lib, "opencv_gpu249d.lib")  
#pragma comment(lib, "opencv_highgui249d.lib")  
#else  
#pragma comment(lib, "opencv_core249.lib")  
#pragma comment(lib, "opencv_imgproc249.lib")  
//#pragma comment(lib, "opencv_gpu249.lib")  
#pragma comment(lib, "opencv_highgui249.lib")  
#endif     


#define RWIDTH 800  
#define RHEIGHT 600  

using namespace std;  
using namespace cv;  

void ProccTimePrint( unsigned long Atime , string msg);

int main()  
{  

 //video input
 VideoCapture cap("C:\\videoSample\\tracking\\rouen_video.avi");

 //variable
 Mat o_frame;  
 Mat showMat_r;  
 Mat showMat_r2;  
 

 //first frame
 cap >> o_frame;  
 if( o_frame.empty() )  
  return 0;   


 unsigned long AAtime=0;
 namedWindow("origin",0);
 namedWindow("canny",0);

 while(1)  
 {  
  /////////////////////////////////////////////////////////////////////////  
  AAtime = getTickCount();  

  //frame
  cap >> o_frame;  
  if( o_frame.empty() )  
   return 0;  

  resize(o_frame, showMat_r, Size(RWIDTH, RHEIGHT) );  
  Canny(showMat_r, showMat_r2, 50, 100);

  imshow("origin", showMat_r);  
  imshow("canny", showMat_r2);  

  //processing time
  ProccTimePrint(AAtime , "Total");     

  if( waitKey(5) > 0)  
   break;  

 }

 return 0;
}


void ProccTimePrint( unsigned long Atime , string msg)     
{     
 unsigned long Btime=0;     
 float sec, fps;     
 Btime = getTickCount();     
 sec = (Btime - Atime)/getTickFrequency();     
 fps = 1/sec;     
 printf("%s %.4lf(sec) / %.4lf(fps) \n", msg.c_str(),  sec, fps );     
}

///

gpu version code
#include < time.h>  
#include < opencv2\opencv.hpp>  
#include < opencv2\gpu\gpu.hpp>  
#include < string>  
#include < stdio.h>  


#ifdef _DEBUG          
#pragma comment(lib, "opencv_core249d.lib")  
#pragma comment(lib, "opencv_imgproc249d.lib")   //MAT processing  
#pragma comment(lib, "opencv_gpu249d.lib")  
#pragma comment(lib, "opencv_highgui249d.lib")  
#else  
#pragma comment(lib, "opencv_core249.lib")  
#pragma comment(lib, "opencv_imgproc249.lib")  
#pragma comment(lib, "opencv_gpu249.lib")  
#pragma comment(lib, "opencv_highgui249.lib")  
#endif     


#define RWIDTH 800  
#define RHEIGHT 600  

using namespace std;  
using namespace cv;  

void ProccTimePrint( unsigned long Atime , string msg);

int main()  
{  

 //input
 VideoCapture cap("C:\\videoSample\\tracking\\rouen_video.avi");

 //variable
 Mat o_frame;  
 Mat showMat_r;  
 Mat showMat_r2;  
 gpu::GpuMat o_frame_gpu;
 gpu::GpuMat r_frame_gpu;
 gpu::GpuMat rg_frame_gpu;
 gpu::GpuMat r_frame_gpu2;

 //first frame
 cap >> o_frame;  
 if( o_frame.empty() )  
  return 0;   


 unsigned long AAtime=0;

 while(1)  
 {  
  /////////////////////////////////////////////////////////////////////////  
  AAtime = getTickCount();  

  //frame
  cap >> o_frame;  
  if( o_frame.empty() )  
   return 0;  

  //upload to gpumat
  o_frame_gpu.upload(o_frame);  
  gpu::resize(o_frame_gpu, r_frame_gpu, Size(RWIDTH, RHEIGHT) );  
  gpu::cvtColor(r_frame_gpu, rg_frame_gpu, CV_BGR2GRAY);
  gpu::Canny(rg_frame_gpu, r_frame_gpu2, 50, 100); //gray only

  //download to mat
  r_frame_gpu.download(showMat_r);  
  r_frame_gpu2.download(showMat_r2);  

  
  //show image
  imshow("origin", showMat_r);  
  imshow("canny", showMat_r2);  

  //processing time
  ProccTimePrint(AAtime , "Total");     

  if( waitKey(10) > 0)  
   break;  

 }

 return 0;
}


void ProccTimePrint( unsigned long Atime , string msg)     
{     
 unsigned long Btime=0;     
 float sec, fps;     
 Btime = getTickCount();     
 sec = (Btime - Atime)/getTickFrequency();     
 fps = 1/sec;     
 printf("%s %.4lf(sec) / %.4lf(fps) \n", msg.c_str(),  sec, fps );     
}



1/26/2015

Real-time yard trailer identification by detection of vehicle ID numbers

Real-time yard trailer identification by detection of vehicle ID numbers

Project period : (2013.09~2013.11)


*Introduction
• Y/T(Yard Trailer) Y/T(Yard Trailer) number identification solution using image processing.
• The solution using camera is easy to installation and maintance compare to the RFID. And It is more free from distance constraint.
• Machine learning methods - SVM (Support Vector Machine), MLP (Multi Layers Perceptron) are used to recognize the number ID
• A high-speed image processing through the GPU parallel programming


*Real-time pre-processing for features extraction
• The process of preprocessing for ID number extraction
-In the first step, we apply different filters, morphological operations, contour algorithms, and validations to retrieve those parts of the image that could have targeted region.
-Especially, we targeted to detect a ventilating opening instead of number ID, because that target is less shape change than the 3 characters of number ID.



*Vehicle Identification
• HOG(Histogram of gradient) feature extraction and SVM machine learning to detect a ventilating opening


• Each segmented character is to extract the features for training and classifying the MLP algorithm
• The feature is horizontal, vertical histogram values from 5x5 low resolution image.


*Experiment
• Recognition rate over the 95%
• Detection speed about 0.05 sec/frame (Image size : 1280x720, Intel® core™ i5-3570 cpu 3.40GHz, NVIDIA Geforce GTX 650)
• The trailer enter speed about 20~30 km/h


#include < stdio.h>

#include "ShinPortOCR.h"


void main()
{

 ShinPortOCR cShinPortOCR;

 //printf("연속으로 읽을 이미지 파일 갯수? (ex:10 -> ./data/1.jpg, ./data/2.jpg ... ./data/10.jpg) \n");
 printf("How many images do you want to test? (ex:100, 500,  1630\n");
 int num;
 scanf_s("%d", &num);

 int p = 0, n = 0;
 char str[100];
 for (int i = 0; i< num; ++i)
 {
  printf("%d/%d\n", i, num);

  sprintf_s(str, "./data/%d.jpg", i + 1);
  Mat inImg = imread(str, 1);//, CV_LOAD_IMAGE_GRAYSCALE);
  Mat OutImg; 
  if (cShinPortOCR.GoGoXing(inImg, OutImg, 1) == -111) //1 is debug print, 0 is no dubug out
  {
   sprintf_s(str, ".\\Log\\fail\\%d.jpg", i + 1);
   imwrite(str, inImg);
  }
  else{
   sprintf_s(str, ".\\Log\\success\\%d.jpg", i + 1);
   imwrite(str, inImg);
  }


  sprintf_s(str, ".\\Log\\processing\\%d.jpg", i + 1);
  imwrite(str, OutImg);


  imshow("result", OutImg);
  waitKey(10);

 }
}


///

Source code is here
https://github.com/MareArts/Container-Yard-Trailer-ID-number-recognition

you can down opencv dll/lib/header files on here
opencv 249 64bit cuda 60
https://www.amazon.com/clouddrive/share/7bPR5HgbCbNZJHwG0ldq1gwHtydLXRxtQVYc5JYPlSF?ref_=cd_ph_share_link_copy


The method to check that camera is moving or not(stop) using dense optical flow. (applied opencv, gpu version)

This post introduces how to check if the camera(video) is moving or not.


This method can be applied various fields.
This approach also means what scene is important or not.

To solve this problem, I used a dense optical flow.
I introduce dense optical flow on youtube.
http://www.youtube.com/watch?v=yAz1qrN6T_o
http://www.youtube.com/watch?v=iRMqH6y6JKU

and on my blog
http://feelmare.blogspot.kr/search/label/dense%20optical%20flow

In example source code..
The algorithm is checking that how much percent of area is moving?
And, check distance of each pixel movement. Smaller than the threshold value does not include in the movement percent.

Note, the example source code is made by gpu version.
#include < stdio.h>

#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2\gpu\gpu.hpp>
#include < opencv2\nonfree\features2d.hpp >    



#ifdef _DEBUG        
#pragma comment(lib, "opencv_core249d.lib")
#pragma comment(lib, "opencv_imgproc249d.lib")   //MAT processing
#pragma comment(lib, "opencv_objdetect249d.lib") //HOGDescriptor
#pragma comment(lib, "opencv_gpu249d.lib")
#pragma comment(lib, "opencv_features2d249d.lib")
#pragma comment(lib, "opencv_highgui249d.lib")
#else
#pragma comment(lib, "opencv_core249.lib")
#pragma comment(lib, "opencv_imgproc249.lib")
#pragma comment(lib, "opencv_objdetect249.lib")
#pragma comment(lib, "opencv_gpu249.lib")
#pragma comment(lib, "opencv_features2d249.lib")
#pragma comment(lib, "opencv_highgui249.lib")
#endif 

using namespace std;
using namespace cv;

#define WIDTH_DENSE (80)
#define HEIGHT_DENSE (60)

#define DENSE_DRAW 0 //dense optical flow arrow drawing or not
#define GLOBAL_MOTION_TH1 1
#define GLOBAL_MOTION_TH2 70


float drawOptFlowMap_gpu (const Mat& flow_x, const Mat& flow_y, Mat& cflowmap, int step, float scaleX, float scaleY, int drawOnOff);


int main()
{
 //stream /////////////////////////////////////////////////
 VideoCapture stream1("C:\\videoSample\\medical\\HUV-03-14.wmv"); 

 //variables /////////////////////////////////////////////
 Mat O_Img; //Mat
 gpu::GpuMat O_Img_gpu; //GPU
 gpu::GpuMat R_Img_gpu_dense; //gpu dense resize
 gpu::GpuMat R_Img_gpu_dense_gray_pre; //gpu dense resize gray
 gpu::GpuMat R_Img_gpu_dense_gray; //gpu dense resize gray
 gpu::GpuMat flow_x_gpu, flow_y_gpu;
 Mat flow_x, flow_y;

 //algorithm *************************************
 //dense optical flow
 gpu::FarnebackOpticalFlow fbOF;
 

 //running once //////////////////////////////////////////
 if(!(stream1.read(O_Img))) //get one frame form video
 {
  printf("Open Fail !!\n");
  return 0; 
 }

  //for rate calucation
 float scaleX, scaleY;
 scaleX = O_Img.cols/WIDTH_DENSE;
 scaleY = O_Img.rows/HEIGHT_DENSE;

 O_Img_gpu.upload(O_Img); 
 gpu::resize(O_Img_gpu, R_Img_gpu_dense, Size(WIDTH_DENSE, HEIGHT_DENSE));
 gpu::cvtColor(R_Img_gpu_dense, R_Img_gpu_dense_gray_pre, CV_BGR2GRAY);


 //unconditional loop   ///////////////////////////////////
 while (true) {
  //reading
  if( stream1.read(O_Img) == 0) //get one frame form video   
   break;

  // ---------------------------------------------------
  //upload cou mat to gpu mat
  O_Img_gpu.upload(O_Img); 
  //resize
  gpu::resize(O_Img_gpu, R_Img_gpu_dense, Size(WIDTH_DENSE, HEIGHT_DENSE));
  //color to gray
  gpu::cvtColor(R_Img_gpu_dense, R_Img_gpu_dense_gray, CV_BGR2GRAY);
  
  //calculate dense optical flow using GPU version
  fbOF.operator()(R_Img_gpu_dense_gray_pre, R_Img_gpu_dense_gray, flow_x_gpu, flow_y_gpu);
  flow_x_gpu.download( flow_x );
  flow_y_gpu.download( flow_y );


  //calculate motion rate in whole image
  float motionRate = drawOptFlowMap_gpu(flow_x, flow_y, O_Img, 1, scaleX, scaleY, DENSE_DRAW);
  //update pre image
  R_Img_gpu_dense_gray_pre = R_Img_gpu_dense_gray.clone();



  //display "moving" or "stop" message on the image.
  if(motionRate > GLOBAL_MOTION_TH2 ) //if motion generate over than 70%, this algorithm consider that video is moving.
  {
   char TestStr[100] = "Moving!!";
   putText(O_Img, TestStr, Point(30,60), CV_FONT_NORMAL, 2, Scalar(0,0,255),3,2); //OutImg is Mat class;   
  }else{
   char TestStr[100] = "Stop!!";
   putText(O_Img, TestStr, Point(30,60), CV_FONT_NORMAL, 2, Scalar(255,0,0),3,2); //OutImg is Mat class; 
  }


  // show image ----------------------------------------
  imshow("Origin", O_Img);   

  // wait key
  if( cv::waitKey(100) > 30)
   break;
 }
}



float drawOptFlowMap_gpu (const Mat& flow_x, const Mat& flow_y, Mat& cflowmap, int step, float scaleX, float scaleY, int drawOnOff)
{
 double count=0;

 float countOverTh1 = 0;
 int sx,sy;
 for(int y = 0; y < HEIGHT_DENSE; y += step)
 {
  for(int x = 0; x < WIDTH_DENSE; x += step)
  {
   
   if(drawOnOff)
   {
    Point2f fxy;    
    fxy.x = cvRound( flow_x.at< float >(y, x)*scaleX + x*scaleX );   
    fxy.y = cvRound( flow_y.at< float >(y, x)*scaleY + y*scaleY );   
    line(cflowmap, Point(x*scaleX,y*scaleY), Point(fxy.x, fxy.y), CV_RGB(0, 255, 0));   
    circle(cflowmap, Point(fxy.x, fxy.y), 1, CV_RGB(0, 255, 0), -1);   
   }

   float xx = fabs(flow_x.at< float >(y, x) );
   float yy = fabs(flow_y.at< float >(y, x) );

   float xxyy = sqrt(xx*xx + yy*yy);
   if( xxyy > GLOBAL_MOTION_TH1 )
    countOverTh1 = countOverTh1 +1;
   
   count=count+1;
  }
 }
 return (countOverTh1 / count) * 100;

}


1/25/2015

MIL, Boosting tracker test in opencv 3.0

I have tested MIL, Boosting tracking algorithm in opencv 3.0

Please refer to official reference here -> http://docs.opencv.org/trunk/modules/tracking/doc/tracking.html

Firstly, to use tracker algorithm, you have to set the value which is tracking module path of OPENCV_EXTRA_MODUALES_PATH option, when you build the opencv 3.0.



We can download tracking module in Github. -> https://github.com/Itseez/opencv_contrib
"opencv_contrib" is developing separately with opencv full version.
In setting cmake, I checked only tracking option. because other module may be unstable version yet.
And I need just tracking module.


The example source code is referenced in here -> https://github.com/lenlen/opencv/blob/tracking_api/samples/cpp/tracker.cpp

In source code, you can select Mil and Boosting algorithm option in the function of creation.

At the result, MIL is faster than boosting.
But both algorithm are very slow and performance is also not good yet.
So they can not use in industrial application.


MIL result video

Boosting result video


The example source code is here





#include < stdio.h>  
#include < opencv2\video\tracker.hpp>
#include < opencv2\opencv.hpp>


 
#ifdef _DEBUG          
#pragma comment(lib, "opencv_core300d.lib")  
#pragma comment(lib, "opencv_highgui300d.lib")  
#pragma comment(lib, "opencv_imgcodecs300d.lib")  
#pragma comment(lib, "opencv_videoio300d.lib") 
#pragma comment(lib, "opencv_imgproc300d.lib") 
#pragma comment(lib, "opencv_cuda300d.lib")   
#pragma comment(lib, "opencv_cudawarping300d.lib")
#pragma comment(lib, "opencv_tracking300d.lib")
#define _DEBUG_RRRR  
#else  
#pragma comment(lib, "opencv_core300.lib")  
#pragma comment(lib, "opencv_highgui300.lib")  
#pragma comment(lib, "opencv_imgcodecs300.lib")  
#pragma comment(lib, "opencv_videoio300.lib") 
#pragma comment(lib, "opencv_imgproc300.lib") 
#pragma comment(lib, "opencv_cuda300.lib")   
#pragma comment(lib, "opencv_cudawarping300.lib")
#pragma comment(lib, "opencv_tracking300d.lib")
#endif  


using namespace cv;
using namespace std;

static Mat image;
static bool selectObject = false;
static bool startSelection = false;
static Rect2d boundingBox;
static bool paused;

static void onMouse( int event, int x, int y, int, void* ) 
{ 
 if( !selectObject ) 
 { 
  switch ( event ) 
  { 
  case EVENT_LBUTTONDOWN: 
   //set origin of the bounding box 
   startSelection = true; 
   boundingBox.x = x; 
   boundingBox.y = y; 
   break; 
  case EVENT_LBUTTONUP: 
   //sei with and height of the bounding box 
   boundingBox.width = std::abs( x - boundingBox.x ); 
   boundingBox.height = std::abs( y - boundingBox.y ); 
   paused = false; 
   selectObject = true; 

   printf("Object Rect Size(left, right, width, height) %.1f %.1f %.1f %.1f\n", boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
   break; 
  case EVENT_MOUSEMOVE: 
   if( startSelection && !selectObject ) 
   { 
    //draw the bounding box 
    Mat currentFrame; 
    image.copyTo( currentFrame ); 
    rectangle( currentFrame, Point( boundingBox.x, boundingBox.y ), Point( x, y ), Scalar( 255, 0, 0 ), 2, 1 ); 
    imshow( "Tracking API", currentFrame ); 
   } 
   break; 
  } 
 } 
} 




int main()
{
 unsigned long AAtime=0, BBtime=0;

 VideoCapture cap;
 cap.open("C:\\videoSample\\tracking\\sample2.avi");

 if( !cap.isOpened() ) 
 { 
  printf("Video File Open Fail! \n");
  return -1; 
 } 


 Mat frame; 
 paused = false; 
 namedWindow( "Tracking", 0 ); 
 setMouseCallback( "Tracking", onMouse, 0 );


 //instantiates the specific Tracker 
 //MIL : TrackerMIL
 //BOOSTING : TrackerBoosting
 Ptr< Tracker> tracker = Tracker::create("BOOSTING" ); //"MIL");//
 if( tracker == NULL ) 
 { 
  printf("Error in the instantiation of the tracker..\n");
  return -1; 
 } 

 //get the first frame 
 cap >> frame; 
 frame.copyTo( image ); 
 imshow( "Tracking", image ); 

 bool initialized = false;
 while(1)
 { 
  if( !paused ) 
  { 
   cap >> frame; 
   frame.copyTo( image ); 


   if( !initialized && selectObject ) 
   { 
    //initializes the tracker 
    AAtime = getTickCount();
    if( !tracker->init( frame, boundingBox ) ) 
    { 
     printf("Could not initialize tracker\n"); 
     return -1; 
    } 
    BBtime = getTickCount();
    double fps = (BBtime - AAtime)/getTickFrequency();
    double sec = 1/fps;
    printf("Tracking Initial time = %.2lffsp, %.2lfsec \n",  fps, sec );

    initialized = true; 
   } 
   else if( initialized ) 
   { 
    AAtime = getTickCount();
    
    //updates the tracker 
    if( tracker->update( frame, boundingBox ) ) 
    { 
     rectangle( image, boundingBox, Scalar( 255, 0, 0 ), 2, 1 ); 
    } 

    BBtime = getTickCount();
    double fps = (BBtime - AAtime)/getTickFrequency();
    double sec = 1/fps;
    printf("Tracking update time = %.2lffsp, %.2lfsec \n",  fps, sec );

   } 
   imshow( "Tracking", image ); 
  } 


  char c = (char) waitKey( 2 ); 
  if( c == 'q' ) 
   break; 
  if( c == 'p' ) 
   paused = !paused; 


 } 
}