diff --git a/core/sample-vpColor.cpp b/core/sample-vpColor.cpp index c46c637..a17c615 100644 --- a/core/sample-vpColor.cpp +++ b/core/sample-vpColor.cpp @@ -20,7 +20,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #endif diff --git a/core/sample-vpImageConvert-3.cpp b/core/sample-vpImageConvert-3.cpp deleted file mode 100644 index 73549f3..0000000 --- a/core/sample-vpImageConvert-3.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x020408) - vpImage Ic; // A color image - IplImage* Ip = NULL; - - // Read an image on a disk - vpImageIo::readPPM(Ic, "image.ppm"); - // Convert the vpImage in to color IplImage - vpImageConvert::convert(Ic, Ip); - // Treatments on IplImage - //... - // Save the IplImage on the disk - cvSaveImage("Ipl.ppm", Ip); - - //Release Ip header and data - cvReleaseImage(&Ip); -#endif -} - - diff --git a/core/sample-vpImageConvert-4.cpp b/core/sample-vpImageConvert-4.cpp deleted file mode 100644 index f16d5d7..0000000 --- a/core/sample-vpImageConvert-4.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x020408) - vpImage Ig; // A greyscale image - IplImage* Ip = NULL; - - // Read an image on a disk - vpImageIo::readPGM(Ig, "image.pgm"); - // Convert the vpImage in to greyscale IplImage - vpImageConvert::convert(Ig, Ip); - // Treatments on IplImage Ip - //... - // Save the IplImage on the disk - cvSaveImage("Ipl.pgm", Ip); - - //Release Ip header and data - cvReleaseImage(&Ip); -#endif -} - diff --git a/core/sample-vpImageConvert-6.cpp b/core/sample-vpImageConvert-6.cpp index ae1de1e..9212180 100644 --- a/core/sample-vpImageConvert-6.cpp +++ b/core/sample-vpImageConvert-6.cpp @@ -8,13 +8,13 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100) && defined(HAVE_OPENCV_IMGCODECS) - vpImage Ig; // A greyscale image +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_IMGCODECS) + vpImage Ig; // A grayscale image cv::Mat Ip; // Read an image on a disk vpImageIo::readPGM(Ig, "image.pgm"); - // Convert the vpImage in to greyscale cv::Mat + // Convert the vpImage in to grayscale cv::Mat vpImageConvert::convert(Ig, Ip); // Treatments on cv::Mat Ip //... diff --git a/core/sample-vpImageConvert-7.cpp b/core/sample-vpImageConvert-7.cpp deleted file mode 100644 index cf045ff..0000000 --- a/core/sample-vpImageConvert-7.cpp +++ /dev/null @@ -1,26 +0,0 @@ -#include -#include -#include - -#if defined(HAVE_OPENCV_IMGCODECS) -#include -#endif - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100) && defined(HAVE_OPENCV_IMGCODECS) - vpImage Ig; // A greyscale image - cv::Mat Ip; - - // Read an image on a disk - vpImageIo::readPGM(Ig, "image.pgm"); - // Convert the vpImage in to color cv::Mat. - vpImageConvert::convert(Ig, Ip); - // Treatments on cv::Mat Ip - //... - // Save the cv::Mat on the disk - cv::imwrite("image.pgm", Ip); -#endif -} - - diff --git a/core/sample-vpImageConvert-8.cpp b/core/sample-vpImageConvert-8.cpp index c277c18..9159fd1 100644 --- a/core/sample-vpImageConvert-8.cpp +++ b/core/sample-vpImageConvert-8.cpp @@ -8,7 +8,7 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100) && defined(HAVE_OPENCV_IMGCODECS) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_IMGCODECS) vpImage Ig; // A grayscale image cv::Mat Ip; diff --git a/core/sample-vpImageConvert-9.cpp b/core/sample-vpImageConvert-9.cpp index 6765473..195e584 100644 --- a/core/sample-vpImageConvert-9.cpp +++ b/core/sample-vpImageConvert-9.cpp @@ -9,7 +9,7 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100) && defined(HAVE_OPENCV_IMGCODECS) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_IMGCODECS) vpImage Ic; // A color image cv::Mat Ip; diff --git a/core/sample-vpImageConvert.cpp b/core/sample-vpImageConvert.cpp deleted file mode 100644 index 6f6649e..0000000 --- a/core/sample-vpImageConvert.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include -#include -#include - -int main() -{ -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x020408) - vpImage Ig; // A grayscale image - IplImage* Ip; - - // Read an image on a disk with openCV library - Ip = cvLoadImage("image.pgm", CV_LOAD_IMAGE_GRAYSCALE); - // Convert the grayscale IplImage into vpImage - vpImageConvert::convert(Ip, Ig); - - // ... - - // Release Ip header and data - cvReleaseImage(&Ip); -#endif -} diff --git a/core/sample-vpImageFilter.cpp b/core/sample-vpImageFilter.cpp index cbb8cb4..80128cd 100644 --- a/core/sample-vpImageFilter.cpp +++ b/core/sample-vpImageFilter.cpp @@ -3,7 +3,7 @@ int main() { -#if (VISP_HAVE_OPENCV_VERSION >= 0x020100) && (VISP_HAVE_OPENCV_VERSION < 0x030000) // Cany uses OpenCV >=2.1.0 and < 3.0.0 +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) // Constants for the Canny operator. const unsigned int gaussianFilterSize = 5; const double thresholdCanny = 15; @@ -13,9 +13,9 @@ int main() vpImage Isrc; vpImage Icanny; - //First grab the source image Isrc. + // First grab the source image Isrc. - //Apply the Canny edge operator and set the Icanny image. + // Apply the Canny edge operator and set the Icanny image. vpImageFilter::canny(Isrc, Icanny, gaussianFilterSize, thresholdCanny, apertureSobel); #endif return (0); diff --git a/detection/sample-vpDetectorFace.cpp b/detection/sample-vpDetectorFace.cpp index 1288f0b..c1d0b50 100644 --- a/detection/sample-vpDetectorFace.cpp +++ b/detection/sample-vpDetectorFace.cpp @@ -2,13 +2,13 @@ int main() { -#if (VISP_HAVE_OPENCV_VERSION >= 0x020200) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_OBJDETECT) vpImage I; vpDetectorFace face_detector; face_detector.setCascadeClassifierFile("haarcascade_frontalface_alt.xml"); - while(1) { - // acquire a new image in I + while (1) { + // Acquire a new image in I bool face_found = face_detector.detect(I); if (face_found) { vpRect face_bbox = face_detector.getBBox(0); // largest face has index 0 diff --git a/gui/sample-vpDisplay-2.cpp b/gui/sample-vpDisplay-2.cpp index 83d4b2f..b9fad35 100644 --- a/gui/sample-vpDisplay-2.cpp +++ b/gui/sample-vpDisplay-2.cpp @@ -23,7 +23,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #endif @@ -43,11 +43,11 @@ int main() vpDisplay::flush(I); // Updates the color image with the original loaded image and the overlay - vpDisplay::getImage(I, Ioverlay) ; + vpDisplay::getImage(I, Ioverlay); // Write the color image on the disk std::string ofilename("overlay.ppm"); - vpImageIo::writePPM(Ioverlay, ofilename) ; + vpImageIo::writePPM(Ioverlay, ofilename); // Wait for a click in the display window vpDisplay::getClick(I); diff --git a/gui/sample-vpDisplay-3.cpp b/gui/sample-vpDisplay-3.cpp index 26e4d21..bc037f7 100644 --- a/gui/sample-vpDisplay-3.cpp +++ b/gui/sample-vpDisplay-3.cpp @@ -23,7 +23,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #endif @@ -42,11 +42,11 @@ int main() vpDisplay::flush(I); // Updates the color image with the original loaded image and the overlay - vpDisplay::getImage(I, Ioverlay) ; + vpDisplay::getImage(I, Ioverlay); // Write the color image on the disk std::string ofilename("overlay.ppm"); - vpImageIo::writePPM(Ioverlay, ofilename) ; + vpImageIo::writePPM(Ioverlay, ofilename); // Wait for a click in the display window vpDisplay::getClick(I); diff --git a/gui/sample-vpDisplay-4.cpp b/gui/sample-vpDisplay-4.cpp index 1e53c9b..3589f5b 100644 --- a/gui/sample-vpDisplay-4.cpp +++ b/gui/sample-vpDisplay-4.cpp @@ -20,7 +20,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #else std::cout << "Sorry, no video device is available" << std::endl; @@ -52,11 +52,11 @@ int main() event = vpDisplay::getKeyboardEvent(I, &key[0], false); if (event) { std::cout << "Key detected: " << key << std::endl; - cpt_event ++; + cpt_event++; } vpTime::wait(5); // wait 5 ms - } while(cpt_event < 5); + } while (cpt_event < 5); #ifdef VISP_HAVE_DISPLAY delete d; diff --git a/gui/sample-vpDisplay-5.cpp b/gui/sample-vpDisplay-5.cpp index 3bda7e4..40ee794 100644 --- a/gui/sample-vpDisplay-5.cpp +++ b/gui/sample-vpDisplay-5.cpp @@ -18,7 +18,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #else std::cout << "Sorry, no video device is available" << std::endl; @@ -50,11 +50,11 @@ int main() event = vpDisplay::getKeyboardEvent(I, &key[0], false); if (event) { std::cout << "Key detected: " << key << std::endl; - cpt_event ++; + cpt_event++; } vpTime::wait(5); // wait 5 ms - } while(cpt_event < 5); + } while (cpt_event < 5); #ifdef VISP_HAVE_DISPLAY delete d; diff --git a/gui/sample-vpDisplay-6.cpp b/gui/sample-vpDisplay-6.cpp index 3bda7e4..40ee794 100644 --- a/gui/sample-vpDisplay-6.cpp +++ b/gui/sample-vpDisplay-6.cpp @@ -18,7 +18,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #else std::cout << "Sorry, no video device is available" << std::endl; @@ -50,11 +50,11 @@ int main() event = vpDisplay::getKeyboardEvent(I, &key[0], false); if (event) { std::cout << "Key detected: " << key << std::endl; - cpt_event ++; + cpt_event++; } vpTime::wait(5); // wait 5 ms - } while(cpt_event < 5); + } while (cpt_event < 5); #ifdef VISP_HAVE_DISPLAY delete d; diff --git a/gui/sample-vpDisplay-7.cpp b/gui/sample-vpDisplay-7.cpp index caef39f..b71cfdc 100644 --- a/gui/sample-vpDisplay-7.cpp +++ b/gui/sample-vpDisplay-7.cpp @@ -21,7 +21,7 @@ int main() d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) +#elif defined(HAVE_OPENCV_HIGHGUI) d = new vpDisplayOpenCV; #else std::cout << "Sorry, no video device is available" << std::endl; diff --git a/gui/sample-vpDisplay.cpp b/gui/sample-vpDisplay.cpp index 83c9859..6077921 100644 --- a/gui/sample-vpDisplay.cpp +++ b/gui/sample-vpDisplay.cpp @@ -7,88 +7,88 @@ int main() { - try { - vpImage I; // Grey level image + try { + vpImage I; // Grey level image - // Read an image in PGM P5 format + // Read an image in PGM P5 format #ifdef _WIN32 - std::string filename("C:/temp/ViSP-images/Klimt/Klimt.ppm"); + std::string filename("C:/temp/ViSP-images/Klimt/Klimt.ppm"); #else - std::string filename("/local/soft/ViSP/ViSP-images/Klimt/Klimt.ppm"); + std::string filename("/local/soft/ViSP/ViSP-images/Klimt/Klimt.ppm"); #endif - vpDisplay *d; + vpDisplay *d; - // Depending on the detected third party libraries, we instantiate here the - // first video device which is available + // Depending on the detected third party libraries, we instantiate here the + // first video device which is available #if defined(VISP_HAVE_X11) - d = new vpDisplayX; + d = new vpDisplayX; #elif defined(VISP_HAVE_GTK) - d = new vpDisplayGTK; + d = new vpDisplayGTK; #elif defined(VISP_HAVE_GDI) - d = new vpDisplayGDI; + d = new vpDisplayGDI; #elif defined(VISP_HAVE_D3D9) - d = new vpDisplayD3D; -#elif defined(VISP_HAVE_OPENCV) - d = new vpDisplayOpenCV; + d = new vpDisplayD3D; +#elif defined(HAVE_OPENCV_HIGHGUI) + d = new vpDisplayOpenCV; #endif - // Initialize the display with the image I. Display and image are - // now link together. + // Initialize the display with the image I. Display and image are + // now link together. #ifdef VISP_HAVE_DISPLAY - d->init(I); + d->init(I); #endif - // Specify the window location - vpDisplay::setWindowPosition(I, 400, 100); - - // Set the display window title - vpDisplay::setTitle(I, "My image"); - - // To initialize the video device, it is also possible to replace - // the 3 previous lines by: - // d->init(I, 400, 100, "My image"); - - // Set the display background with image I content - vpDisplay::display(I); - - // Draw a red rectangle in the display overlay (foreground) - vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); - - // Draw a red rectangle in the display overlay (foreground) - vpImagePoint topLeftCorner; - topLeftCorner.set_i(50); - topLeftCorner.set_j(10); - vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); - - // Flush the foreground and background display - vpDisplay::flush(I); - - // Get non blocking keyboard events - std::cout << "Check keyboard events..." << std::endl; - char key[10]; - bool ret; - for (int i=0; i< 200; i++) { - bool ret = vpDisplay::getKeyboardEvent(I, key, false); - if (ret) - std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; - vpTime::wait(40); - } - - // Get a blocking keyboard event - std::cout << "Wait for a keyboard event..." << std::endl; - ret = vpDisplay::getKeyboardEvent(I, key, true); - std::cout << "keyboard event: " << ret << std::endl; - if (ret) - std::cout << "key: " << "\"" << key << "\"" << std::endl; - - // Wait for a click in the display window - std::cout << "Wait for a button click..." << std::endl; - vpDisplay::getClick(I); + // Specify the window location + vpDisplay::setWindowPosition(I, 400, 100); + + // Set the display window title + vpDisplay::setTitle(I, "My image"); + + // To initialize the video device, it is also possible to replace + // the 3 previous lines by: + // d->init(I, 400, 100, "My image"); + + // Set the display background with image I content + vpDisplay::display(I); + + // Draw a red rectangle in the display overlay (foreground) + vpDisplay::displayRectangle(I, 10, 10, 100, 20, vpColor::red, true); + + // Draw a red rectangle in the display overlay (foreground) + vpImagePoint topLeftCorner; + topLeftCorner.set_i(50); + topLeftCorner.set_j(10); + vpDisplay::displayRectangle(I, topLeftCorner, 100, 20, vpColor::green, true); + + // Flush the foreground and background display + vpDisplay::flush(I); + + // Get non blocking keyboard events + std::cout << "Check keyboard events..." << std::endl; + char key[10]; + bool ret; + for (int i = 0; i < 200; i++) { + bool ret = vpDisplay::getKeyboardEvent(I, key, false); + if (ret) + std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; + vpTime::wait(40); + } + + // Get a blocking keyboard event + std::cout << "Wait for a keyboard event..." << std::endl; + ret = vpDisplay::getKeyboardEvent(I, key, true); + std::cout << "keyboard event: " << ret << std::endl; + if (ret) + std::cout << "key: " << "\"" << key << "\"" << std::endl; + + // Wait for a click in the display window + std::cout << "Wait for a button click..." << std::endl; + vpDisplay::getClick(I); #ifdef VISP_HAVE_DISPLAY - delete d; + delete d; #endif - } - catch(...) {} + } + catch (...) { } } diff --git a/gui/sample-vpDisplayOpenCV.cpp b/gui/sample-vpDisplayOpenCV.cpp index fb4afb7..45bb94b 100644 --- a/gui/sample-vpDisplayOpenCV.cpp +++ b/gui/sample-vpDisplayOpenCV.cpp @@ -3,11 +3,10 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x030000) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_HIGHGUI) vpImage I; // Grey level image // Read an image in PGM P5 format - //vpImageIo::readPGM(I, "/local/soft/ViSP/ViSP-images/Klimt/Klimt.pgm"); vpImageIo::readPGM(I, "/tmp/Klimt.pgm"); vpDisplayOpenCV d; @@ -41,7 +40,7 @@ int main() std::cout << "Check keyboard events..." << std::endl; char key[10]; bool ret; - for (int i=0; i< 200; i++) { + for (int i = 0; i < 200; i++) { bool ret = vpDisplay::getKeyboardEvent(I, key, false); if (ret) std::cout << "keyboard event: key: " << "\"" << key << "\"" << std::endl; diff --git a/io/sample-vpVideoWriter.cpp b/io/sample-vpVideoWriter.cpp index f1e477e..213a0dd 100644 --- a/io/sample-vpVideoWriter.cpp +++ b/io/sample-vpVideoWriter.cpp @@ -2,6 +2,7 @@ int main() { +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && defined(HAVE_OPENCV_HIGHGUI) vpImage I; vpVideoWriter writer; @@ -10,17 +11,16 @@ int main() writer.setFramerate(30); #if VISP_HAVE_OPENCV_VERSION >= 0x030000 - writer.setCodec( cv::VideoWriter::fourcc('P','I','M','1') ); -#elif VISP_HAVE_OPENCV_VERSION >= 0x020100 - writer.setCodec( CV_FOURCC('P','I','M','1') ); + writer.setCodec(cv::VideoWriter::fourcc('P', 'I', 'M', '1')); +#else + writer.setCodec(CV_FOURCC('P', 'I', 'M', '1')); #endif writer.setFileName("./test.mpeg"); writer.open(I); - for ( ; ; ) - { + for (; ; ) { // Here the code to capture or create an image and store it in I. // Save the image @@ -28,6 +28,6 @@ int main() } writer.close(); - +#endif return 0; } diff --git a/json/sample-json-vpMe.cpp b/json/sample-json-vpMe.cpp index 66d1764..3a50ea6 100644 --- a/json/sample-json-vpMe.cpp +++ b/json/sample-json-vpMe.cpp @@ -6,7 +6,8 @@ int main() std::string filename = "me.json"; { vpMe me; - me.setThreshold(10000); + me.setLikelihoodThresholdType(vpMe::NORMALIZED_THRESHOLD); + me.setThreshold(20); me.setMaskNumber(180); me.setMaskSign(0); me.setMu1(0.5); diff --git a/klt/sample-vpKltOpencv.cpp b/klt/sample-vpKltOpencv.cpp index 186d911..9952bfd 100644 --- a/klt/sample-vpKltOpencv.cpp +++ b/klt/sample-vpKltOpencv.cpp @@ -5,21 +5,20 @@ int main() { -#if (VISP_HAVE_OPENCV_VERSION >= 0x010100) && (VISP_HAVE_OPENCV_VERSION < 0x020408) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpImage I; - IplImage* Icv = NULL; + cv::Mat Icv; vpKltOpencv klt; - //First grab the initial image I + // First grab the initial image I - //Convert the image I to the IplImage format. + // Convert the image I to the IplImage format. vpImageConvert::convert(I, Icv); - //Initialise the tracking on the whole image. - klt.initTracking(Icv, NULL); + // Initialise the tracking on the whole image. + klt.initTracking(Icv); - while(true) - { + while (true) { // Grab a new image and convert it to the OpenCV format. vpImageConvert::convert(I, Icv); @@ -29,8 +28,6 @@ int main() // Display the features tracked at the current iteration. klt.display(I); } - - cvReleaseImage(&Icv); #else std::cout << "vpKltOpencv requires ViSP with OpenCV." << std::endl; #endif diff --git a/mbt/sample-vpMbEdgeKltTracker-1.cpp b/mbt/sample-vpMbEdgeKltTracker-1.cpp index e3032c3..ad1f75b 100644 --- a/mbt/sample-vpMbEdgeKltTracker-1.cpp +++ b/mbt/sample-vpMbEdgeKltTracker-1.cpp @@ -7,7 +7,7 @@ int main() { -#if defined VISP_HAVE_OPENCV +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. vpImage I; vpHomogeneousMatrix cMo; // Pose used to display the model. @@ -18,7 +18,7 @@ int main() #if defined VISP_HAVE_X11 vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); + display.init(I, 100, 100, "Mb Hybrid Tracker"); #endif tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker @@ -26,14 +26,13 @@ int main() // load the 3d model, to read .wrl model coin is required, if coin is not installed .cao file can be used. tracker.loadModel("cube.cao"); - while(true){ + while (true) { // acquire a new image // Get the pose using any method vpDisplay::display(I); tracker.display(I, cMo, cam, vpColor::darkRed, 1, true); // Display the model at the computed pose. vpDisplay::flush(I); } - #endif return 0; diff --git a/mbt/sample-vpMbEdgeKltTracker-2.cpp b/mbt/sample-vpMbEdgeKltTracker-2.cpp index 72afdac..6d51981 100644 --- a/mbt/sample-vpMbEdgeKltTracker-2.cpp +++ b/mbt/sample-vpMbEdgeKltTracker-2.cpp @@ -7,7 +7,7 @@ int main() { -#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x030000) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. vpImage I; vpHomogeneousMatrix cMo; // Pose used to display the model. @@ -18,14 +18,14 @@ int main() #if defined VISP_HAVE_X11 vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); + display.init(I, 100, 100, "Mb Hybrid Tracker"); #endif tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). tracker.loadModel("cube.cao"); // load the 3d model, to read .wrl model coi is required, if coin is not installed .cao file can be used. - while(true){ + while (true) { // acquire a new image // Get the pose using any method vpDisplay::display(I); diff --git a/mbt/sample-vpMbEdgeKltTracker.cpp b/mbt/sample-vpMbEdgeKltTracker.cpp index b3d7622..f9009d6 100644 --- a/mbt/sample-vpMbEdgeKltTracker.cpp +++ b/mbt/sample-vpMbEdgeKltTracker.cpp @@ -8,7 +8,7 @@ int main() { -#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbEdgeKltTracker tracker; // Create an hybrid model based tracker. vpImage I; vpHomogeneousMatrix cMo; // Pose computed using the tracker. @@ -19,15 +19,15 @@ int main() #if defined VISP_HAVE_X11 vpDisplayX display; - display.init(I,100,100,"Mb Hybrid Tracker"); + display.init(I, 100, 100, "Mb Hybrid Tracker"); #endif tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - tracker.initClick(I, "cube.init"); // Initialise manually the pose by clicking on the image points associated to the 3d points containned in the cube.init file. + tracker.initClick(I, "cube.init"); // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the cube.init file. - while(true){ + while (true) { // Acquire a new image vpDisplay::display(I); tracker.track(I); // Track the object on this image diff --git a/mbt/sample-vpMbKltTracker-1.cpp b/mbt/sample-vpMbKltTracker-1.cpp index 954ba0d..1c9abbd 100644 --- a/mbt/sample-vpMbKltTracker-1.cpp +++ b/mbt/sample-vpMbKltTracker-1.cpp @@ -6,7 +6,7 @@ int main() { -#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbKltTracker tracker; // Create a model based tracker via Klt Points. vpImage I; vpHomogeneousMatrix cMo; // Pose used in entry (has to be defined), then computed using the tracker. @@ -18,7 +18,7 @@ int main() tracker.loadModel("cube.cao"); // load the 3d model, to read .wrl model coi is required, if coin is not installed .cao file can be used. tracker.initFromPose(I, cMo); // initialise the tracker with the given pose. - while(true){ + while (true) { // acquire a new image tracker.track(I); // track the object on this image tracker.getPose(cMo); // get the pose diff --git a/mbt/sample-vpMbKltTracker-2.cpp b/mbt/sample-vpMbKltTracker-2.cpp index 207d304..95a3549 100644 --- a/mbt/sample-vpMbKltTracker-2.cpp +++ b/mbt/sample-vpMbKltTracker-2.cpp @@ -7,7 +7,7 @@ int main() { -#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbKltTracker tracker; // Create a model based tracker via Klt Points. vpImage I; vpHomogeneousMatrix cMo; // Pose used to display the model. @@ -18,14 +18,14 @@ int main() #if defined VISP_HAVE_X11 vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); + display.init(I, 100, 100, "Mb Klt Tracker"); #endif tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). tracker.loadModel("cube.cao"); // load the 3d model, to read .wrl model coi is required, if coin is not installed .cao file can be used. - while(true){ + while (true) { // acquire a new image // Get the pose using any method vpDisplay::display(I); diff --git a/mbt/sample-vpMbKltTracker.cpp b/mbt/sample-vpMbKltTracker.cpp index 20305df..6c5da03 100644 --- a/mbt/sample-vpMbKltTracker.cpp +++ b/mbt/sample-vpMbKltTracker.cpp @@ -8,7 +8,7 @@ int main() { -#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) vpMbKltTracker tracker; // Create a model based tracker via KLT points. vpImage I; vpHomogeneousMatrix cMo; // Pose computed using the tracker. @@ -19,15 +19,15 @@ int main() #if defined VISP_HAVE_X11 vpDisplayX display; - display.init(I,100,100,"Mb Klt Tracker"); + display.init(I, 100, 100, "Mb Klt Tracker"); #endif tracker.loadConfigFile("cube.xml"); // Load the configuration of the tracker tracker.getCameraParameters(cam); // Get the camera parameters used by the tracker (from the configuration file). tracker.loadModel("cube.cao"); // Load the 3d model in cao format. No 3rd party library is required - tracker.initClick(I, "cube.init"); // Initialise manually the pose by clicking on the image points associated to the 3d points containned in the cube.init file. + tracker.initClick(I, "cube.init"); // Initialise manually the pose by clicking on the image points associated to the 3d points contained in the cube.init file. - while(true){ + while (true) { // Acquire a new image vpDisplay::display(I); tracker.track(I); // Track the object on this image diff --git a/vision/sample-vpKeyPoint-2.cpp b/vision/sample-vpKeyPoint-2.cpp index a386155..3fed7d3 100644 --- a/vision/sample-vpKeyPoint-2.cpp +++ b/vision/sample-vpKeyPoint-2.cpp @@ -5,8 +5,9 @@ #include #include -#if (VISP_HAVE_OPENCV_VERSION >= 0x020101) -int main() { +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D) +int main() +{ vpImage IRef, I, IMatching; vpImageIo::read(IRef, "box.png"); vpImageIo::read(I, "box_in_scene.png"); @@ -16,7 +17,7 @@ int main() { //Hamming distance must be used with ORB const std::string matcherName = "BruteForce-Hamming"; vpKeyPoint::vpFilterMatchingType filterType = - vpKeyPoint::ratioDistanceThreshold; + vpKeyPoint::ratioDistanceThreshold; vpKeyPoint keypoint(detectorName, extractorName, matcherName, filterType); keypoint.setMatchingRatioThreshold(0.8); @@ -43,7 +44,7 @@ int main() { vpDisplay::display(IMatching); //Compute a Ransac homography to find the box - vpCameraParameters cam(600, 600, I.getWidth()/2, I.getHeight()/2); + vpCameraParameters cam(600, 600, I.getWidth() / 2, I.getHeight() / 2); vpHomography H; //List of keypoints coordinates in normalized camera frame in reference image @@ -64,12 +65,12 @@ int main() { double residual; //At least 50% of the matched points must have a positive vote to consider //the solution picked valid - unsigned int nb_inliers_consensus = (unsigned int) mPref_x.size() / 2; + unsigned int nb_inliers_consensus = (unsigned int)mPref_x.size() / 2; //Maximum error (in meter) allowed to consider a point as an inlier double ransac_threshold = 2.0 / cam.get_px(); vpHomography::ransac(mPref_x, mPref_y, mPcur_x, mPcur_y, H, inliers, - residual, nb_inliers_consensus, ransac_threshold, - true); + residual, nb_inliers_consensus, ransac_threshold, + true); //Defines the 4 corners of the box in the reference and the current images @@ -101,7 +102,8 @@ int main() { return 0; } #else -int main() { +int main() +{ return 0; } #endif diff --git a/vision/sample-vpKeyPoint-3.cpp b/vision/sample-vpKeyPoint-3.cpp index 7c2e9a0..96e4aab 100644 --- a/vision/sample-vpKeyPoint-3.cpp +++ b/vision/sample-vpKeyPoint-3.cpp @@ -3,7 +3,7 @@ int main() { -#if (VISP_HAVE_OPENCV_VERSION >= 0x020300) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D) vpImage Irefrence; vpImage Icurrent; diff --git a/vision/sample-vpKeyPoint-4.cpp b/vision/sample-vpKeyPoint-4.cpp index 9196d7b..b9c0e94 100644 --- a/vision/sample-vpKeyPoint-4.cpp +++ b/vision/sample-vpKeyPoint-4.cpp @@ -4,7 +4,7 @@ int main() { -#if (VISP_HAVE_OPENCV_VERSION >= 0x020300) +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D) vpImage Ireference; vpImage Icurrent; @@ -16,8 +16,7 @@ int main() //Select a part of the image by clincking on two points which define a rectangle vpImagePoint corners[2]; - for (int i=0 ; i < 2 ; i++) - { + for (int i = 0; i < 2; i++) { vpDisplay::getClick(Ireference, corners[i]); } @@ -31,8 +30,7 @@ int main() //Then grab another image which represents the current image Icurrent //Select a part of the image by clincking on two points which define a rectangle - for (int i=0 ; i < 2 ; i++) - { + for (int i = 0; i < 2; i++) { vpDisplay::getClick(Icurrent, corners[i]); } diff --git a/vision/sample-vpKeyPoint.cpp b/vision/sample-vpKeyPoint.cpp index d882eb8..aa52ab6 100644 --- a/vision/sample-vpKeyPoint.cpp +++ b/vision/sample-vpKeyPoint.cpp @@ -3,8 +3,9 @@ #include #include -#if (VISP_HAVE_OPENCV_VERSION >= 0x020101) -int main() { +#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D) +int main() +{ //Test Matching side by side vpImage IRef, I, IMatching; vpImageIo::read(IRef, "box.png"); @@ -15,7 +16,7 @@ int main() { //Hamming distance must be used with ORB const std::string matcherName = "BruteForce-Hamming"; vpKeyPoint::vpFilterMatchingType filterType = - vpKeyPoint::ratioDistanceThreshold; + vpKeyPoint::ratioDistanceThreshold; vpKeyPoint keypoint(detectorName, extractorName, matcherName, filterType); @@ -46,7 +47,8 @@ int main() { return 0; } #else -int main() { +int main() +{ return 0; } #endif