Just change mode
and run!
'./' means sh command
# chmod a+x install_test.run
# ./install_test.run
Linux is not easy to me.....
#include < iostream> #include < vector> #include < stdio.h> #include < opencv2\opencv.hpp> #include < opencv2\legacy\legacy.hpp> #ifdef _DEBUG #pragma comment(lib, "opencv_core249d.lib") #pragma comment(lib, "opencv_imgproc249d.lib") //MAT processing #pragma comment(lib, "opencv_objdetect249d.lib") //HOGDescriptor //#pragma comment(lib, "opencv_gpu249d.lib") //#pragma comment(lib, "opencv_features2d249d.lib") #pragma comment(lib, "opencv_highgui249d.lib") #pragma comment(lib, "opencv_ml249d.lib") //#pragma comment(lib, "opencv_stitching249d.lib"); //#pragma comment(lib, "opencv_nonfree249d.lib"); #pragma comment(lib, "opencv_video249d.lib") #pragma comment(lib, "opencv_legacy249d.lib") #else #pragma comment(lib, "opencv_core249.lib") #pragma comment(lib, "opencv_imgproc249.lib") #pragma comment(lib, "opencv_objdetect249.lib") //#pragma comment(lib, "opencv_gpu249.lib") //#pragma comment(lib, "opencv_features2d249.lib") #pragma comment(lib, "opencv_highgui249.lib") #pragma comment(lib, "opencv_ml249.lib") //#pragma comment(lib, "opencv_stitching249.lib"); //#pragma comment(lib, "opencv_nonfree249.lib"); #pragma comment(lib, "opencv_video249d.lib") #endif using namespace cv; using namespace std; // (1) functions for calculating the likelihood float calc_likelihood (IplImage * img, int x, int y) { float b, g, r; float dist = 0.0, sigma = 50.0; b = img->imageData[img->widthStep * y + x * 3]; //B g = img->imageData[img->widthStep * y + x * 3 + 1]; //G r = img->imageData[img->widthStep * y + x * 3 + 2]; //R dist = sqrt (b * b + g * g + (255.0 - r) * (255.0 - r)); return 1.0 / (sqrt (2.0 * CV_PI) * sigma) * expf (-dist * dist / (2.0 * sigma * sigma)); } void ProccTimePrint( unsigned long Atime , string msg) { unsigned long Btime=0; float sec, fps; Btime = getTickCount(); sec = (Btime - Atime)/getTickFrequency(); fps = 1/sec; printf("%s %.4lf(sec) / %.4lf(fps) \n", msg.c_str(), sec, fps ); } int main () { float TakeTime; unsigned long Atime, Btime; int i, c; double w = 0.0, h = 0.0; CvCapture *capture = 0; IplImage *frame = 0; int n_stat = 4; int n_particle = 1000; vector< float > vx(n_particle); vector< float > vy(n_particle); CvConDensation *cond = 0; CvMat *lowerBound = 0; CvMat *upperBound = 0; int xx, yy; // (2)you want to create a capture structure with respect to the camera with the specified number by the command argument capture = cvCreateCameraCapture(0); // (3)The one frame captured, and obtains the capture size. frame = cvQueryFrame (capture); w = frame->width; h = frame->height; cvNamedWindow ("Condensation", CV_WINDOW_AUTOSIZE); // (4)Condensation create a structure. cond = cvCreateConDensation (n_stat, 0, n_particle); // (5) it will specify the minimum and maximum values of the state vector can be taken for each dimension. lowerBound = cvCreateMat (4, 1, CV_32FC1); upperBound = cvCreateMat (4, 1, CV_32FC1); cvmSet (lowerBound, 0, 0, 0.0); cvmSet (lowerBound, 1, 0, 0.0); cvmSet (lowerBound, 2, 0, -10); //-10.0); cvmSet (lowerBound, 3, 0, -10); //-10.0); cvmSet (upperBound, 0, 0, w); cvmSet (upperBound, 1, 0, h); cvmSet (upperBound, 2, 0, 10); //10.0); cvmSet (upperBound, 3, 0, 10); //10.0); // (6)Condensation Initialize the structure cvConDensInitSampleSet (cond, lowerBound, upperBound); // (7)ConDensation Specify the dynamics of the state vector in the algorithm cond->DynamMatr[0] = 1.0; cond->DynamMatr[1] = 0.0; cond->DynamMatr[2] = 1.0; cond->DynamMatr[3] = 0.0; cond->DynamMatr[4] = 0.0; cond->DynamMatr[5] = 1.0; cond->DynamMatr[6] = 0.0; cond->DynamMatr[7] = 1.0; cond->DynamMatr[8] = 0.0; cond->DynamMatr[9] = 0.0; cond->DynamMatr[10] = 1.0; cond->DynamMatr[11] = 0.0; cond->DynamMatr[12] = 0.0; cond->DynamMatr[13] = 0.0; cond->DynamMatr[14] = 0.0; cond->DynamMatr[15] = 1.0; // (8)re-set the noise parameters. while (1) { frame = cvQueryFrame (capture); Atime = getTickCount(); //μμ μκ° float a=0,b=0,c=0,d=0,e=0; // (9) It calculates the likelihood for each particle. for (i = 0; i < n_particle; i++) { xx = (int) (cond->flSamples[i][0]); yy = (int) (cond->flSamples[i][1]); vx[i] = cond->flSamples[i][0]; vy[i] = cond->flSamples[i][1]; if (xx < 0 || xx >= w || yy < 0 || yy >= h) { cond->flConfidence[i] = 0.0; } else { cond->flConfidence[i] = calc_likelihood (frame, xx, yy); cvCircle (frame, cvPoint (xx, yy), 1, CV_RGB (0, 0, 255), -1); } } // (10) estimate the state of the next model cvConDensUpdateByTime (cond); printf("crrection \n"); ProccTimePrint( Atime , "time :"); //μ²λ¦¬μκ° μΆλ ₯ cv::Point statePt(cond->State[0], cond->State[1]); cvCircle (frame, statePt, 5, CV_RGB (255, 255, 255), 5); printf("-----------\n"); cvShowImage ("Condensation", frame); if (cvWaitKey (10) > 10 ) break; } cvDestroyWindow ("Condensation"); cvReleaseCapture (&capture); cvReleaseConDensation (&cond); cvReleaseMat (&lowerBound); cvReleaseMat (&upperBound); return 0; }
#include < iostream> #include "opencv2\objdetect\objdetect.hpp" #include "opencv2\highgui\highgui.hpp" #include "opencv2\imgproc\imgproc.hpp" #include "opencv2\cudaobjdetect.hpp" #include "opencv2\cudaimgproc.hpp" #include "opencv2\cudawarping.hpp" #ifdef _DEBUG #pragma comment(lib, "opencv_core300d.lib") #pragma comment(lib, "opencv_highgui300d.lib") #pragma comment(lib, "opencv_imgcodecs300d.lib") #pragma comment(lib, "opencv_objdetect300d.lib") #pragma comment(lib, "opencv_imgproc300d.lib") #pragma comment(lib, "opencv_videoio300d.lib") #pragma comment(lib, "opencv_cudaobjdetect300d.lib") #pragma comment(lib, "opencv_cudawarping300d.lib") #pragma comment(lib, "opencv_cudaimgproc300d.lib") #else #pragma comment(lib, "opencv_core300.lib") #pragma comment(lib, "opencv_highgui300.lib") #pragma comment(lib, "opencv_imgcodecs300.lib") #pragma comment(lib, "opencv_objdetect300.lib") #pragma comment(lib, "opencv_imgproc300.lib") #pragma comment(lib, "opencv_videoio300.lib") #pragma comment(lib, "opencv_cudaobjdetect300.lib") #pragma comment(lib, "opencv_cudawarping300.lib") #pragma comment(lib, "opencv_cudaimgproc300.lib") #endif using namespace std; using namespace cv; void main() { cv::Ptr< cv::cuda::HOG> d_hog = cv::cuda::HOG::create(Size(48, 96)); //Size(64, 128));// d_hog->setSVMDetector(d_hog->getDefaultPeopleDetector()); //video loading Mat img; VideoCapture cap("M:\\____videoSample____\\tracking\\TownCentreXVID.avi"); //loading test cap >> img; if (img.empty()) return; //window namedWindow("pedestrian", 0); //processing time check unsigned long AAtime = 0, BBtime = 0; //resize double scale = float(800) / img.cols; cuda::GpuMat GpuImg, rGpuImg; GpuImg.upload(img); cuda::resize(GpuImg, rGpuImg, Size(GpuImg.cols * scale, GpuImg.rows * scale)); Mat rInimg; rGpuImg.download(rInimg); while (1) { //time check AAtime = getTickCount(); //loading cap >> img; if (img.empty()) break; //resize GpuImg.upload(img); cuda::resize(GpuImg, rGpuImg, Size(GpuImg.cols * scale, GpuImg.rows * scale)); rGpuImg.download(rInimg); cuda::cvtColor(rGpuImg, rGpuImg, CV_BGR2GRAY); vector< Point> found_locations; d_hog->detect(rGpuImg, found_locations); std::vector< cv::Rect> found_locations_rect; d_hog->detectMultiScale(rGpuImg, found_locations_rect); for (int i = 0; i < found_locations_rect.size(); ++i) { cv::rectangle(rInimg, found_locations_rect[i], CvScalar(0, 0, 255), 1); } imshow("pedestrian", rInimg); waitKey(10); //time check BBtime = getTickCount(); double s_time = (BBtime - AAtime) / getTickFrequency(); double fps_time = 1/s_time; printf("%.2lf sec / %.2lf fps \n", s_time, fps_time); } }