-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathpnp_pose_estimator.cpp
70 lines (60 loc) · 2.17 KB
/
pnp_pose_estimator.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#include "pnp_pose_estimator.h"
#include "opencv2/calib3d.hpp"
#include "opencv2/core/eigen.hpp"
PnPPoseEstimator::PnPPoseEstimator(const Eigen::Matrix3d& K, bool do_iterative_also)
: do_iterative_also_{do_iterative_also}
{
// Convert to cv::Mat.
cv::eigen2cv(K, K_);
}
PoseEstimate PnPPoseEstimator::estimate(const std::vector<cv::Point2f>& image_points,
const std::vector<cv::Point3f>& world_points)
{
// Set a minimum required number of points,
// here 3 times the theoretic minimum.
constexpr size_t min_number_points = 9;
// Check that we have enough points.
if (image_points.size() < min_number_points)
{
return {};
}
// Find inliers and compute initial pose with RANSAC.
cv::Vec3d r_vec;
cv::Vec3d t_vec;
std::vector<int> inliers;
// Take a look at the OpenCV documentation for "solvePnPRansac". What is r_vec and t_vec?
// Improve the pose estimation by testing different values for the maximal reprojection error.
cv::solvePnPRansac(world_points, image_points,
K_, {}, r_vec, t_vec, false, 10000, 2.0, 0.99, inliers, cv::SOLVEPNP_AP3P);
// Check that we have enough inliers.
if (inliers.size() < min_number_points)
{
return {};
}
// Extract inliers.
std::vector<cv::Point2f> inlier_image_points;
std::vector<cv::Point3f> inlier_world_points;
for (int inlier : inliers)
{
inlier_image_points.push_back(image_points[inlier]);
inlier_world_points.push_back(world_points[inlier]);
}
// Compute the camera pose with an iterative method
// using the entire inlier set.
// Use "cv::solvePnP" on inlier points to improve "r_vec" and "t_vec". Use the iterative method with current r_vec and t_vec as initial values.
if (do_iterative_also_)
{
cv::solvePnP(inlier_world_points, inlier_image_points, K_, {}, r_vec, t_vec, true);
}
// Convert to rotation matrix.
cv::Mat R_cv;
cv::Rodrigues(r_vec, R_cv);
// Convert to Eigen.
Eigen::Matrix3d R;
cv::cv2eigen(R_cv, R);
Eigen::Vector3d t;
cv::cv2eigen(t_vec, t);
// Return camera pose in the world.
Sophus::SE3d pose_C_W(R, t);
return {pose_C_W.inverse(), inlier_image_points, inlier_world_points};
}