6/12/2015

MSER text detection example (opencv 300)

MSER text detection example



...
#include < iostream>  
#include < opencv2\opencv.hpp>  
#include < opencv2\highgui.hpp>  
#include < opencv2\imgcodecs.hpp>  
#include < opencv2\core.hpp>
#include < opencv2\imgproc.hpp>
#include < opencv2\text.hpp>
#include < opencv2/features2d.hpp>


#ifdef _DEBUG             
#pragma comment(lib, "opencv_core300d.lib")     
#pragma comment(lib, "opencv_highgui300d.lib")  
#pragma comment(lib, "opencv_imgcodecs300d.lib")
#pragma comment(lib, "opencv_text300d.lib")
#pragma comment(lib, "opencv_features2d300d.lib")
#pragma comment(lib, "opencv_imgproc300d.lib")

#else     
#pragma comment(lib, "opencv_core300.lib")     
#pragma comment(lib, "opencv_highgui300.lib")  
#pragma comment(lib, "opencv_imgcodecs300.lib")  
#pragma comment(lib, "opencv_text300.lib")
#pragma comment(lib, "opencv_features2d300.lib")
#pragma comment(lib, "opencv_imgproc300d.lib")
#endif      

using namespace std;
using namespace cv;

void groups_draw(Mat &src, vector< Rect> &groups);

void main()
{

 Mat inImg = imread(".\\scenetext01.jpg");
 
 vector< Mat> channels;
 text::computeNMChannels(inImg, channels);

 int cn = (int)channels.size();
 // Append negative channels to detect ER- (bright regions over dark background)
 for (int c = 0; c < cn - 1; c++)
  channels.push_back(255 - channels[c]);

 


 Ptr< text::ERFilter> er_filter1 = text::createERFilterNM1(text::loadClassifierNM1(".\\trained_classifierNM1.xml"), 
  16, 0.00015f, 0.13f, 0.2f, true, 0.1f);
 Ptr< text::ERFilter> er_filter2 = text::createERFilterNM2(text::loadClassifierNM2(".\\trained_classifierNM2.xml"), 0.5);

 vector< vector< text::ERStat> > regions(channels.size());

 for (int c = 0; c< (int)channels.size(); c++)
 {
  er_filter1->run(channels[c], regions[c]);
  er_filter2->run(channels[c], regions[c]);
 }


 // Detect character groups
 cout << "Grouping extracted ERs ... ";
 vector< vector< Vec2i> > region_groups;
 vector< Rect> groups_boxes;
 text::erGrouping(inImg, channels, regions, region_groups, groups_boxes, text::ERGROUPING_ORIENTATION_HORIZ);

 groups_draw(inImg, groups_boxes);
 imshow("grouping", inImg);

 waitKey(0);

}


void groups_draw(Mat &src, vector< Rect> &groups)
{
 for (int i = (int)groups.size() - 1; i >= 0; i--)
 {
  if (src.type() == CV_8UC3)
   rectangle(src, groups.at(i).tl(), groups.at(i).br(), Scalar(0, 255, 255), 3, 8);
  else
   rectangle(src, groups.at(i).tl(), groups.at(i).br(), Scalar(255), 3, 8);
 }
}

///

OpenCV MSER example (opencv 300 )

MSER example




...
#include < iostream>  
#include < opencv2\opencv.hpp>  
#include < opencv2\highgui.hpp>  
#include < opencv2\imgcodecs.hpp>  
#include < opencv2\core.hpp>
#include < opencv2\imgproc.hpp>
#include < opencv2\text.hpp>
#include < opencv2/features2d.hpp>


#ifdef _DEBUG             
#pragma comment(lib, "opencv_core300d.lib")     
#pragma comment(lib, "opencv_highgui300d.lib")  
#pragma comment(lib, "opencv_imgcodecs300d.lib")
#pragma comment(lib, "opencv_text300d.lib")
#pragma comment(lib, "opencv_features2d300d.lib")
#pragma comment(lib, "opencv_imgproc300d.lib")

#else     
#pragma comment(lib, "opencv_core300.lib")     
#pragma comment(lib, "opencv_highgui300.lib")  
#pragma comment(lib, "opencv_imgcodecs300.lib")  
#pragma comment(lib, "opencv_text300.lib")
#pragma comment(lib, "opencv_features2d300.lib")
#pragma comment(lib, "opencv_imgproc300d.lib")
#endif      

using namespace std;
using namespace cv;
void main()
{

 Mat inImg = imread("M:\\____videoSample____\\SceneText\\SceneText01.jpg");

 Mat textImg;
 cvtColor(inImg, textImg, CV_BGR2GRAY);
 //Extract MSER
 
 vector< vector< Point> > contours;
 vector< Rect> bboxes;
 Ptr< MSER> mser = MSER::create(21, (int)(0.00002*textImg.cols*textImg.rows), (int)(0.05*textImg.cols*textImg.rows), 1, 0.7); 
 mser->detectRegions(textImg, contours, bboxes); 

 for (int i = 0; i < bboxes.size(); i++)
 {
  rectangle(inImg, bboxes[i], CV_RGB(0, 255, 0));
 }


 namedWindow("t");
 imshow("t", inImg);
 waitKey(0);

}


6/10/2015

Line equation study in 3D


Equations of a straight line in 3D space.

Above that of any points straight line in three-dimensional space, it can be represented as a vector r.


It represents a 3D point coordinates as follows:




Let's further understanding through exercises


ex1) Find the vector equation of a line through (-16, 4, 11) and (8, 0, -5)


How to get vector b ?






Let see one more example.

ex2) Find the vector equation of a line through (-7, -7, 2) and parallel to the line in ex1)

vector b is same with ex1), because parallel.

And this line through on (-7, -7, 2), so equation is like that





And let's see one more example.
Don't you interesting?

ex3) Find line equation of vector b.





ex4)
Firstly, let find AX vector.


And we should get t value.
How? the principles of inner product.
It is a vector perpendicular to the inner product is '0'.
So.



input t value into AX vector. then..





ex5)

Find the acute angle between line 1 and line 2

where 


->
The directions are and   in L1 and L2.

In inner product rule, 
 

So, we will follow below formula..


->










6/08/2015

Dense optical flow test in 2 continuous images(opencv 2.4.9)

Dear Rahma
I hope to help this code to you.
Thank you.




...
#include < stdio.h>    
  
#include < opencv2\opencv.hpp>    
#include < opencv2/core/core.hpp>    
#include < opencv2/highgui/highgui.hpp>    
#include < opencv2\nonfree\features2d.hpp >        
  
  
  
#ifdef _DEBUG            
#pragma comment(lib, "opencv_core249d.lib")    
#pragma comment(lib, "opencv_imgproc249d.lib")   //MAT processing    
#pragma comment(lib, "opencv_objdetect249d.lib") //HOGDescriptor    
#pragma comment(lib, "opencv_features2d249d.lib")    
#pragma comment(lib, "opencv_highgui249d.lib")    
#pragma comment(lib, "opencv_video249d.lib")    
#else    
#pragma comment(lib, "opencv_core249.lib")    
#pragma comment(lib, "opencv_imgproc249.lib")    
#pragma comment(lib, "opencv_objdetect249.lib")    
#pragma comment(lib, "opencv_features2d249.lib")    
#pragma comment(lib, "opencv_highgui249.lib")   
#pragma comment(lib, "opencv_video249.lib")    
#endif     
  
using namespace std;    
using namespace cv; 

void drawOptFlowMap (const Mat& flow, Mat& cflowmap, int step, const Scalar& color);

void main()
{
 Mat prev_im,nex_im,flow;

 prev_im=imread("1.jpg",0);
 imshow("previm",prev_im);


 nex_im=imread("2.jpg",0);
 imshow("nextim",nex_im);


 calcOpticalFlowFarneback(prev_im,nex_im,flow,0.5, 1, 12, 2, 7, 1.5, 0); 

 Mat cflow;
 cvtColor(prev_im, cflow, CV_GRAY2BGR);
 drawOptFlowMap(flow, cflow, 10, CV_RGB(0, 255, 0));  


 imshow("OpticalFlowFarneback", cflow);
 waitKey(0);

}


void drawOptFlowMap (const Mat& flow, Mat& cflowmap, int step, const Scalar& color) {  
 for(int y = 0; y < cflowmap.rows; y += step)  
  for(int x = 0; x < cflowmap.cols; x += step)  
  {  
   const Point2f& fxy = flow.at< Point2f>(y, x);  
   line(cflowmap, Point(x,y), Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),  
    color);  
   circle(cflowmap, Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), 1, color, -1);  
  }  
}  

...

6/02/2015

opencv 3.0 rc1, example source code for surf and matching (gpu version)

This code is SURF and Matching test in opencv 3.0 rc1.

Especially, for using surf class, we have to add extra library when build opencv 3.0 rc1.
The extra lib is opened in github named opencv_contrib.

refer to this page.
http://study.marearts.com/2015/01/mil-boosting-tracker-test-in-opencv-30.html

surf(gpu version) is included in xfeatures2d/cuda.hpp
and cpu version is xfeatures2d/nonfree.hpp

As we know, surf, sift are still nonfree. very not good!..-.-

necessary dlls

result


Now, refer this example code.
This cod is converted from http://study.marearts.com/2014/07/opencv-study-surf-gpu-and-matching.html.


#include < stdio.h>  
#include < iostream>  


#include < opencv2\opencv.hpp>  
#include < opencv2\core.hpp>
#include < opencv2\highgui.hpp> 
#include < opencv2\cudaarithm.hpp>
#include < opencv2\xfeatures2d\cuda.hpp>
#include < opencv2\cudafeatures2d.hpp>
//#include < opencv2\xfeatures2d\nonfree.hpp>



#ifdef _DEBUG          
#pragma comment(lib, "opencv_core300d.lib")
#pragma comment(lib, "opencv_highgui300d.lib")  
#pragma comment(lib, "opencv_imgcodecs300d.lib")
#pragma comment(lib, "opencv_cudafeatures2d300d.lib")
#pragma comment(lib, "opencv_xfeatures2d300d.lib")
#pragma comment(lib, "opencv_features2d300d.lib")
#else  
#pragma comment(lib, "opencv_core300.lib")
#pragma comment(lib, "opencv_highgui300.lib")
#pragma comment(lib, "opencv_imgcodecs300.lib")
#pragma comment(lib, "opencv_cudafeatures2d300.lib")
#pragma comment(lib, "opencv_xfeatures2d300.lib")
#pragma comment(lib, "opencv_features2d300.lib")
#endif   


using namespace cv;
using namespace std;

void main()
{


 
 cuda::GpuMat img1(imread("M:\\____videoSample____\\Image\\A2.jpg", CV_LOAD_IMAGE_GRAYSCALE));
 cuda::GpuMat img2(imread("M:\\____videoSample____\\Image\\A3.jpg", CV_LOAD_IMAGE_GRAYSCALE));

 
 /////////////////////////////////////////////////////////////////////////////////////////  
 unsigned long t_AAtime = 0, t_BBtime = 0;
 float t_pt;
 float t_fpt;
 t_AAtime = getTickCount();
 /////////////////////////////////////////////////////////////////////////////////////////  

 
 cuda::SURF_CUDA surf(400); 
 // detecting keypoints & computing descriptors   
 cuda::GpuMat keypoints1GPU, keypoints2GPU;
 cuda::GpuMat descriptors1GPU, descriptors2GPU;
 surf(img1, cuda::GpuMat(), keypoints1GPU, descriptors1GPU);
 surf(img2, cuda::GpuMat(), keypoints2GPU, descriptors2GPU);

 cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;
 cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;

 // matching descriptors   
 vector< vector< DMatch> > matches;
 Ptr< cuda::DescriptorMatcher > matcher = cuda::DescriptorMatcher::createBFMatcher();
 matcher->knnMatch(descriptors1GPU, descriptors2GPU, matches, 2);

 // downloading results  Gpu -> Cpu  
 vector< KeyPoint> keypoints1, keypoints2;
 vector< float> descriptors1, descriptors2;
 surf.downloadKeypoints(keypoints1GPU, keypoints1);
 surf.downloadKeypoints(keypoints2GPU, keypoints2);
 //surf.downloadDescriptors(descriptors1GPU, descriptors1);   
 //surf.downloadDescriptors(descriptors2GPU, descriptors2);  

 vector< KeyPoint> matchingKey1, matchingKey2;
 std::vector< DMatch > good_matches;
 for (int k = 0; k < min(descriptors1GPU.rows - 1, (int)matches.size()); k++)
 {
  if ((matches[k][0].distance < 0.6*(matches[k][1].distance)) && ((int)matches[k].size() <= 2 && (int)matches[k].size()>0))
  {
   good_matches.push_back(matches[k][0]);

  }
 }

 t_BBtime = getTickCount();
 t_pt = (t_BBtime - t_AAtime) / getTickFrequency();
 t_fpt = 1 / t_pt;
 printf("feature extraction = %.4lf / %.4lf \n", t_pt, t_fpt);

 // drawing the results   
 Mat img_matches;
 Mat img11, img22;
 img1.download(img11);
 img2.download(img22);

 //drawMatches(img11, matchingKey1, img22, matchingKey2, good_matches, img_matches);   
 drawMatches(img11, keypoints1, img22, keypoints2, good_matches, img_matches);

 namedWindow("matches", 0);
 imshow("matches", img_matches);
 waitKey(0);
 
 matcher.release();

}


6/01/2015

opencv 3.0 rc1, background subtraction mog2 example code

This is mog2 example code in opencv 3.0 rc1 version.

It will be help when you convert opencv 2.x code to 3.x opencv version.
Thank you.

This is mog2 example of 2.4.9 version.
http://study.marearts.com/2015/05/basic-mog2-example-souce-using-opencv.html

...
#include < iostream>  
#include < opencv2\opencv.hpp>  
#include < opencv2\videoio.hpp>  
#include < opencv2\highgui.hpp>  
//#include < opencv2\core\cuda.hpp>
#include < opencv2\core.hpp>
//#include < opencv2\bgsegm.hpp>
#include < opencv2\cudabgsegm.hpp>

#ifdef _DEBUG             
#pragma comment(lib, "opencv_core300d.lib")     
#pragma comment(lib, "opencv_highgui300d.lib")  
#pragma comment(lib, "opencv_videoio300d.lib")  
#pragma comment(lib, "opencv_cudabgsegm300d.lib")  

#else     
#pragma comment(lib, "opencv_core300.lib")     
#pragma comment(lib, "opencv_highgui300.lib")  
#pragma comment(lib, "opencv_videoio300.lib")  
#pragma comment(lib, "opencv_cudabgsegm300.lib")  
#endif      

using namespace std;
using namespace cv;
int main()
{

 //
 
 VideoCapture cap("M:\\____videoSample____\\blog\\video20.wmv");
 //VideoCapture cap("M:\\____videoSample____\\posco\\cartype1.avi");
 
 //
 Ptr< cuda::BackgroundSubtractorMOG2 > MOG2_g = cuda::createBackgroundSubtractorMOG2(3000, 64); 
 Mat Mog_Mask;
 cuda::GpuMat Mog_Mask_g;

 //
 Mat o_frame;
 cuda::GpuMat o_frame_gpu;
 

 cap >> o_frame;
 if (o_frame.empty())
  return 0;

 /////////////////////////////////////////////////////////////////////////    


 unsigned long AAtime = 0, BBtime = 0;

 //Mat rFrame;    
 Mat showMat_r_blur;
 Mat showMat_r;

 namedWindow("origin");
 namedWindow("mog_mask");

 while (1)
 {
  /////////////////////////////////////////////////////////////////////////    
  cap >> o_frame;
  if (o_frame.empty())
   return 0;


  o_frame_gpu.upload(o_frame);
  AAtime = getTickCount();

  //
  MOG2_g->apply(o_frame_gpu, Mog_Mask_g, -1);
  //pMOG2_g.operator()(o_frame_gpu, Mog_Mask_g, -1);
  //  
  Mog_Mask_g.download(Mog_Mask);

  BBtime = getTickCount();
  float pt = (BBtime - AAtime) / getTickFrequency();
  float fpt = 1 / pt;
  printf("gpu %.4lf / %.4lf \n", pt, fpt);



  o_frame_gpu.download(showMat_r);
  imshow("origin", showMat_r);
  imshow("mog_mask", Mog_Mask);


  /////////////////////////////////////////////////////////////////////////    

  if (waitKey(10) > 0)
   break;
 }

 MOG2_g.release();

 return 0;
}



...