ViSP  3.0.0
testKeyPoint-4.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2015 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Test keypoint matching and pose estimation with mostly OpenCV functions calls
32  * to detect potential memory leaks in testKeyPoint-2.cpp.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iostream>
40 
41 #include <visp3/core/vpConfig.h>
42 
43 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020301)
44 
45 #include <opencv2/core/core.hpp>
46 #include <opencv2/features2d/features2d.hpp>
47 #include <visp3/core/vpImage.h>
48 #include <visp3/io/vpImageIo.h>
49 #include <visp3/gui/vpDisplayX.h>
50 #include <visp3/gui/vpDisplayGTK.h>
51 #include <visp3/gui/vpDisplayGDI.h>
52 #include <visp3/gui/vpDisplayOpenCV.h>
53 #include <visp3/io/vpVideoReader.h>
54 #include <visp3/core/vpIoTools.h>
55 #include <visp3/io/vpParseArgv.h>
56 #include <visp3/mbt/vpMbEdgeTracker.h>
57 #include <visp3/core/vpHomogeneousMatrix.h>
58 #include <visp3/vision/vpKeyPoint.h>
59 
60 // List of allowed command line options
61 #define GETOPTARGS "cdh"
62 
63 void usage(const char *name, const char *badparam);
64 bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display);
65 
74 void usage(const char *name, const char *badparam)
75 {
76  fprintf(stdout, "\n\
77 Test keypoints matching.\n\
78 \n\
79 SYNOPSIS\n\
80  %s [-c] [-d] [-h]\n", name);
81 
82  fprintf(stdout, "\n\
83 OPTIONS: \n\
84 \n\
85  -c\n\
86  Disable the mouse click. Useful to automate the \n\
87  execution of this program without human intervention.\n\
88 \n\
89  -d \n\
90  Turn off the display.\n\
91 \n\
92  -h\n\
93  Print the help.\n");
94 
95  if (badparam)
96  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
97 }
98 
110 bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display)
111 {
112  const char *optarg_;
113  int c;
114  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
115 
116  switch (c) {
117  case 'c': click_allowed = false; break;
118  case 'd': display = false; break;
119  case 'h': usage(argv[0], NULL); return false; break;
120 
121  default:
122  usage(argv[0], optarg_);
123  return false; break;
124  }
125  }
126 
127  if ((c == 1) || (c == -1)) {
128  // standalone param or error
129  usage(argv[0], NULL);
130  std::cerr << "ERROR: " << std::endl;
131  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
132  return false;
133  }
134 
135  return true;
136 }
137 
144 int main(int argc, const char ** argv) {
145  try {
146  std::string env_ipath;
147  bool opt_click_allowed = true;
148  bool opt_display = true;
149 
150  // Read the command line options
151  if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
152  exit (-1);
153  }
154 
155  //Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH environment variable value
156  env_ipath = vpIoTools::getViSPImagesDataPath();
157 
158  if(env_ipath.empty()) {
159  std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment variable value." << std::endl;
160  return -1;
161  }
162 
163  vpImage<unsigned char> I, Imatch, Iref;
164 
165  //Set the path location of the image sequence
166  std::string dirname = vpIoTools::createFilePath(env_ipath, "ViSP-images/mbt/cube");
167 
168  //Build the name of the image files
169  std::string filenameRef = vpIoTools::createFilePath(dirname, "image0000.pgm");
170  vpImageIo::read(I, filenameRef);
171  Iref = I;
172  std::string filenameCur = vpIoTools::createFilePath(dirname, "image%04d.pgm");
173 
174 #if defined VISP_HAVE_X11
175  vpDisplayX display, display2;
176 #elif defined VISP_HAVE_GTK
177  vpDisplayGTK display, display2;
178 #elif defined VISP_HAVE_GDI
179  vpDisplayGDI display, display2;
180 #else
181  vpDisplayOpenCV display, display2;
182 #endif
183 
184  if (opt_display) {
185  display.init(I, 0, 0, "ORB keypoints matching");
186  Imatch.resize(I.getHeight(), 2*I.getWidth());
187  Imatch.insert(I, vpImagePoint(0, 0));
188  display2.init(Imatch, 0, (int)I.getHeight() + 70, "ORB keypoints matching");
189  }
190 
191  vpCameraParameters cam;
192  vpMbEdgeTracker tracker;
193  //Load config for tracker
194  std::string tracker_config_file = vpIoTools::createFilePath(env_ipath, "ViSP-images/mbt/cube.xml");
195 
196  bool usexml = false;
197 #ifdef VISP_HAVE_XML2
198  tracker.loadConfigFile(tracker_config_file);
199  tracker.getCameraParameters(cam);
200 
201  usexml = true;
202 #endif
203  if (! usexml) {
204  vpMe me;
205  me.setMaskSize(5);
206  me.setMaskNumber(180);
207  me.setRange(8);
208  me.setThreshold(10000);
209  me.setMu1(0.5);
210  me.setMu2(0.5);
211  me.setSampleStep(4);
212  me.setNbTotalSample(250);
213  tracker.setMovingEdge(me);
214  cam.initPersProjWithoutDistortion(547.7367575, 542.0744058, 338.7036994, 234.5083345);
215  tracker.setCameraParameters(cam);
216  tracker.setNearClippingDistance(0.01);
217  tracker.setFarClippingDistance(100.0);
219  }
220 
221  tracker.setAngleAppear(vpMath::rad(89));
222  tracker.setAngleDisappear(vpMath::rad(89));
223 
224  //Load CAO model
225  std::string cao_model_file = vpIoTools::createFilePath(env_ipath, "ViSP-images/mbt/cube.cao");
226  tracker.loadModel(cao_model_file);
227 
228  //Initialize the pose
229  std::string init_file = vpIoTools::createFilePath(env_ipath, "ViSP-images/mbt/cube.init");
230  if (opt_display && opt_click_allowed) {
231  tracker.initClick(I, init_file);
232  }
233  else
234  {
235  vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
236  tracker.initFromPose(I, cMoi);
237  }
238 
239  //Get the init pose
241  tracker.getPose(cMo);
242 
243  //Init keypoints
244  cv::Ptr<cv::FeatureDetector> detector;
245  cv::Ptr<cv::DescriptorExtractor> extractor;
246  cv::Ptr<cv::DescriptorMatcher> matcher;
247 
248 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
249  detector = cv::ORB::create(500, 1.2f, 1);
250  extractor = cv::ORB::create(500, 1.2f, 1);
251 #elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
252  detector = cv::FeatureDetector::create("ORB");
253  extractor = cv::DescriptorExtractor::create("ORB");
254 #endif
255  matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
256 
257 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
258  detector->set("nLevels", 1);
259 #endif
260 
261  //Detect keypoints on the current image
262  std::vector<cv::KeyPoint> trainKeyPoints;
263  cv::Mat matImg;
264  vpImageConvert::convert(I, matImg);
265  detector->detect(matImg, trainKeyPoints);
266 
267 
268  //Keep only keypoints on the cube
269  std::vector<vpPolygon> polygons;
270  std::vector<std::vector<vpPoint> > roisPt;
271  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces(false);
272  polygons = pair.first;
273  roisPt = pair.second;
274 
275  //Compute the 3D coordinates
276  std::vector<cv::Point3f> points3f;
277  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
278 
279 
280  //Extract descriptors
281  cv::Mat trainDescriptors;
282  extractor->compute(matImg, trainKeyPoints, trainDescriptors);
283 
284  if(trainKeyPoints.size() != (size_t) trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
285  std::cerr << "Problem with training data size !" << std::endl;
286  return -1;
287  }
288 
289 
290  //Init reader for getting the input image sequence
291  vpVideoReader g;
292  g.setFileName(filenameCur);
293  g.open(I);
294  g.acquire(I);
295 
296  bool opt_click = false;
298  while((opt_display && !g.end()) || (!opt_display && g.getFrameIndex() < 30)) {
299  g.acquire(I);
300 
301  vpImageConvert::convert(I, matImg);
302  std::vector<cv::KeyPoint> queryKeyPoints;
303  detector->detect(matImg, queryKeyPoints);
304 
305  cv::Mat queryDescriptors;
306  extractor->compute(matImg, queryKeyPoints, queryDescriptors);
307 
308  std::vector<std::vector<cv::DMatch> > knn_matches;
309  std::vector<cv::DMatch> matches;
310  matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
311  for(std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end(); ++it) {
312  if(it->size() > 1) {
313  double ratio = (*it)[0].distance / (*it)[1].distance;
314  if(ratio < 0.85) {
315  matches.push_back((*it)[0]);
316  }
317  }
318  }
319 
320  vpPose estimated_pose;
321  for(std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
322  vpPoint pt(points3f[(size_t)(it->trainIdx)].x,
323  points3f[(size_t)(it->trainIdx)].y,
324  points3f[(size_t)(it->trainIdx)].z);
325 
326  double x = 0.0, y = 0.0;
327  vpPixelMeterConversion::convertPoint(cam, queryKeyPoints[(size_t)(it->queryIdx)].pt.x, queryKeyPoints[(size_t)(it->queryIdx)].pt.y, x, y);
328  pt.set_x(x);
329  pt.set_y(y);
330 
331  estimated_pose.addPoint(pt);
332  }
333 
334  bool is_pose_estimated = false;
335  if(estimated_pose.npt >= 4) {
336  try {
337  unsigned int nb_inliers = (unsigned int) (0.6 * estimated_pose.npt);
338  estimated_pose.setRansacNbInliersToReachConsensus(nb_inliers);
339  estimated_pose.setRansacThreshold(0.01);
340  estimated_pose.setRansacMaxTrials(500);
341  estimated_pose.computePose(vpPose::RANSAC, cMo);
342  is_pose_estimated = true;
343  } catch(...) {
344  is_pose_estimated = false;
345  }
346  }
347 
348  if(opt_display) {
350 
351  Imatch.insert(I, vpImagePoint(0, Iref.getWidth()));
352  vpDisplay::display(Imatch);
353  for(std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
354  vpImagePoint leftPt(trainKeyPoints[(size_t) it->trainIdx].pt.y, trainKeyPoints[(size_t) it->trainIdx].pt.x);
355  vpImagePoint rightPt(queryKeyPoints[(size_t) it->queryIdx].pt.y, queryKeyPoints[(size_t) it->queryIdx].pt.x
356  + Iref.getWidth());
357  vpDisplay::displayLine(Imatch, leftPt, rightPt, vpColor::green);
358  }
359 
360  if(is_pose_estimated) {
361  tracker.setPose(I, cMo);
362  tracker.display(I, cMo, cam, vpColor::red);
363  vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none);
364  }
365 
366  vpDisplay::flush(Imatch);
367  vpDisplay::flush(I);
368  }
369 
370  //Click requested to process next image
371  if (opt_click_allowed && opt_display) {
372  if(opt_click) {
373  vpDisplay::getClick(I, button, true);
374  if(button == vpMouseButton::button3) {
375  opt_click = false;
376  }
377  } else {
378  //Use right click to enable/disable step by step tracking
379  if(vpDisplay::getClick(I, button, false)) {
380  if (button == vpMouseButton::button3) {
381  opt_click = true;
382  }
383  else if(button == vpMouseButton::button1) {
384  break;
385  }
386  }
387  }
388  }
389  }
390 
391  } catch(vpException &e) {
392  std::cerr << e.what() << std::endl;
393  return -1;
394  }
395 
396  std::cout << "testKeyPoint-4 is ok !" << std::endl;
397  return 0;
398 }
399 #else
400 int main() {
401  std::cerr << "You need OpenCV library." << std::endl;
402 
403  return 0;
404 }
405 
406 #endif
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const char *title=NULL)
void setMovingEdge(const vpMe &me)
long getFrameIndex() const
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1091
unsigned int getWidth() const
Definition: vpImage.h:161
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:232
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:430
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:460
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
void setSampleStep(const double &s)
Definition: vpMe.h:260
void setNbTotalSample(const int &nb)
Definition: vpMe.h:188
Define the X11 console to display images.
Definition: vpDisplayX.h:148
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
static const vpColor none
Definition: vpColor.h:175
error that can be emited by ViSP classes.
Definition: vpException.h:73
void setRansacThreshold(const double &t)
Definition: vpPose.h:167
Definition: vpMe.h:59
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
Make the complete tracking of an object by using its CAD model.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
void loadConfigFile(const std::string &configFile)
static const vpColor green
Definition: vpColor.h:166
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:2233
void setMu1(const double &mu_1)
Definition: vpMe.h:160
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:76
static const vpColor red
Definition: vpColor.h:163
Class that defines what is a point.
Definition: vpPoint.h:59
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
virtual void setNearClippingDistance(const double &dist)
void open(vpImage< vpRGBa > &I)
const char * what() const
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:468
void getPose(vpHomogeneousMatrix &cMo_) const
Definition: vpMbTracker.h:356
static std::string createFilePath(const std::string &parent, const std::string child)
Definition: vpIoTools.cpp:1265
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidate, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:616
bool computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
compute the pose for a given method
Definition: vpPose.cpp:382
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:206
The vpDisplayOpenCV allows to display image using the opencv library.
virtual void getCameraParameters(vpCameraParameters &camera) const
Definition: vpMbTracker.h:225
Class used for pose computation from N points (pose from point only).
Definition: vpPose.h:74
Generic class defining intrinsic camera parameters.
The vpDisplayGTK allows to display image using the GTK+ library version 1.2.
Definition: vpDisplayGTK.h:141
void acquire(vpImage< vpRGBa > &I)
void resize(const unsigned int h, const unsigned int w)
set the size of the image without initializing it.
Definition: vpImage.h:616
virtual void setFarClippingDistance(const double &dist)
void setFileName(const char *filename)
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:419
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, const bool displayHelp=false)
static double rad(double deg)
Definition: vpMath.h:104
unsigned int npt
number of point used in pose computation
Definition: vpPose.h:90
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:175
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:166
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, vpImagePoint offset=vpImagePoint(0, 0))
Definition: vpDisplay.cpp:373
void setMu2(const double &mu_2)
Definition: vpMe.h:174
virtual void loadModel(const char *modelFile, const bool verbose=false)
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
Definition: vpImage.h:935
void setThreshold(const double &t)
Definition: vpMe.h:288
void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, const unsigned int thickness=1, const bool displayFullModel=false)
unsigned int getHeight() const
Definition: vpImage.h:152
virtual bool getClick(bool blocking=true)=0
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
void setRange(const unsigned int &r)
Definition: vpMe.h:218
virtual void setClipping(const unsigned int &flags)
static void read(vpImage< unsigned char > &I, const char *filename)
Definition: vpImageIo.cpp:274
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(const bool orderPolygons=true, const bool useVisibility=true)
void addPoint(const vpPoint &P)
Add a new point in this array.
Definition: vpPose.cpp:151