35#include <visp3/core/vpConfig.h>
37#ifdef VISP_HAVE_APRILTAG
41#include <apriltag_pose.h>
42#include <common/homography.h>
48#include <tagCircle21h7.h>
49#include <tagStandard41h12.h>
50#include <visp3/detection/vpDetectorAprilTag.h>
51#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
52#include <tagCircle49h12.h>
53#include <tagCustom48h12.h>
54#include <tagStandard41h12.h>
55#include <tagStandard52h13.h>
58#include <visp3/core/vpDisplay.h>
59#include <visp3/core/vpPixelMeterConversion.h>
60#include <visp3/core/vpPoint.h>
61#include <visp3/vision/vpPose.h>
63#ifndef DOXYGEN_SHOULD_SKIP_THIS
64class vpDetectorAprilTag::Impl
67 Impl(
const vpAprilTagFamily &tagFamily,
const vpPoseEstimationMethod &method)
68 : m_poseEstimationMethod(method), m_tagsId(), m_tagFamily(tagFamily), m_td(NULL), m_tf(NULL), m_detections(NULL),
69 m_zAlignedWithCameraFrame(false)
71 switch (m_tagFamily) {
73 m_tf = tag36h11_create();
77 m_tf = tag36h10_create();
84 m_tf = tag25h9_create();
88 m_tf = tag25h7_create();
92 m_tf = tag16h5_create();
96 m_tf = tagCircle21h7_create();
100#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
101 m_tf = tagCircle49h12_create();
105 case TAG_CUSTOM48h12:
106#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
107 m_tf = tagCustom48h12_create();
111 case TAG_STANDARD52h13:
112#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
113 m_tf = tagStandard52h13_create();
117 case TAG_STANDARD41h12:
118#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
119 m_tf = tagStandard41h12_create();
127 if (m_tagFamily != TAG_36ARTOOLKIT && m_tf) {
128 m_td = apriltag_detector_create();
129 apriltag_detector_add_family(m_td, m_tf);
137 : m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagsId(o.m_tagsId), m_tagFamily(o.m_tagFamily), m_td(NULL),
138 m_tf(NULL), m_detections(NULL), m_zAlignedWithCameraFrame(o.m_zAlignedWithCameraFrame)
140 switch (m_tagFamily) {
142 m_tf = tag36h11_create();
146 m_tf = tag36h10_create();
149 case TAG_36ARTOOLKIT:
153 m_tf = tag25h9_create();
157 m_tf = tag25h7_create();
161 m_tf = tag16h5_create();
165 m_tf = tagCircle21h7_create();
168 case TAG_CIRCLE49h12:
169#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
170 m_tf = tagCircle49h12_create();
174 case TAG_CUSTOM48h12:
175#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
176 m_tf = tagCustom48h12_create();
180 case TAG_STANDARD52h13:
181#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
182 m_tf = tagStandard52h13_create();
186 case TAG_STANDARD41h12:
187#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
188 m_tf = tagStandard41h12_create();
196 if (m_tagFamily != TAG_36ARTOOLKIT && m_tf) {
197 m_td = apriltag_detector_create();
198 apriltag_detector_add_family(m_td, m_tf);
204 if (o.m_detections != NULL) {
205 m_detections = apriltag_detections_copy(o.m_detections);
212 apriltag_detector_destroy(m_td);
216 switch (m_tagFamily) {
218 tag36h11_destroy(m_tf);
222 tag36h10_destroy(m_tf);
225 case TAG_36ARTOOLKIT:
229 tag25h9_destroy(m_tf);
233 tag25h7_destroy(m_tf);
237 tag16h5_destroy(m_tf);
241 tagCircle21h7_destroy(m_tf);
244 case TAG_CIRCLE49h12:
245#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
246 tagCustom48h12_destroy(m_tf);
250 case TAG_CUSTOM48h12:
251#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
252 tagCustom48h12_destroy(m_tf);
256 case TAG_STANDARD52h13:
257#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
258 tagStandard52h13_destroy(m_tf);
262 case TAG_STANDARD41h12:
263#if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
264 tagStandard41h12_destroy(m_tf);
274 apriltag_detections_destroy(m_detections);
281 for (
unsigned int i = 0; i < 3; i++) {
282 for (
unsigned int j = 0; j < 3; j++) {
283 cMo[i][j] = MATD_EL(pose.R, i, j);
285 cMo[i][3] = MATD_EL(pose.t, i, 0);
290 std::vector<std::vector<vpImagePoint> > &polygons, std::vector<std::string> &messages,
bool displayTag,
291 const vpColor color,
unsigned int thickness, std::vector<vpHomogeneousMatrix> *cMo_vec,
292 std::vector<vpHomogeneousMatrix> *cMo_vec2, std::vector<double> *projErrors,
293 std::vector<double> *projErrors2)
295 if (m_tagFamily == TAG_36ARTOOLKIT) {
297 std::cerr <<
"TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
300#if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
301 if (m_tagFamily == TAG_CIRCLE49h12 || m_tagFamily == TAG_CUSTOM48h12 || m_tagFamily == TAG_STANDARD41h12 ||
302 m_tagFamily == TAG_STANDARD52h13) {
303 std::cerr <<
"TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled."
309 const bool computePose = (cMo_vec != NULL);
311 image_u8_t im = {(int32_t)I.
getWidth(),
317 apriltag_detections_destroy(m_detections);
321 m_detections = apriltag_detector_detect(m_td, &im);
322 int nb_detections = zarray_size(m_detections);
323 bool detected = nb_detections > 0;
325 polygons.resize(
static_cast<size_t>(nb_detections));
326 messages.resize(
static_cast<size_t>(nb_detections));
327 m_tagsId.resize(
static_cast<size_t>(nb_detections));
329 for (
int i = 0; i < zarray_size(m_detections); i++) {
330 apriltag_detection_t *det;
331 zarray_get(m_detections, i, &det);
333 std::vector<vpImagePoint> polygon;
334 for (
int j = 0; j < 4; j++) {
335 polygon.push_back(
vpImagePoint(det->p[j][1], det->p[j][0]));
337 polygons[
static_cast<size_t>(i)] = polygon;
338 std::stringstream ss;
339 ss << m_tagFamily <<
" id: " << det->id;
340 messages[
static_cast<size_t>(i)] = ss.str();
341 m_tagsId[
static_cast<size_t>(i)] = det->id;
353 vpDisplay::displayLine(I, (
int)det->p[1][1], (
int)det->p[1][0], (
int)det->p[2][1], (
int)det->p[2][0], Ox2,
355 vpDisplay::displayLine(I, (
int)det->p[2][1], (
int)det->p[2][0], (
int)det->p[3][1], (
int)det->p[3][0], Oy2,
362 if (getPose(
static_cast<size_t>(i), tagSize, cam, cMo, cMo_vec2 ? &cMo2 : NULL, projErrors ? &err1 : NULL,
363 projErrors2 ? &err2 : NULL)) {
364 cMo_vec->push_back(cMo);
366 cMo_vec2->push_back(cMo2);
369 projErrors->push_back(err1);
372 projErrors2->push_back(err2);
385 for (
size_t i = 0; i < cMo_vec.size(); i++) {
391 void displayFrames(
const vpImage<vpRGBa> &I,
const std::vector<vpHomogeneousMatrix> &cMo_vec,
394 for (
size_t i = 0; i < cMo_vec.size(); i++) {
400 void displayTags(
const vpImage<unsigned char> &I,
const std::vector<std::vector<vpImagePoint> > &tagsCorners,
401 const vpColor &color,
unsigned int thickness)
const
403 for (
size_t i = 0; i < tagsCorners.size(); i++) {
409 const std::vector<vpImagePoint> &corners = tagsCorners[i];
410 assert(corners.size() == 4);
412 vpDisplay::displayLine(I, (
int)corners[0].get_i(), (
int)corners[0].get_j(), (
int)corners[1].get_i(), (
int)corners[1].get_j(),
414 vpDisplay::displayLine(I, (
int)corners[0].get_i(), (
int)corners[0].get_j(), (
int)corners[3].get_i(), (
int)corners[3].get_j(),
416 vpDisplay::displayLine(I, (
int)corners[1].get_i(), (
int)corners[1].get_j(), (
int)corners[2].get_i(), (
int)corners[2].get_j(),
418 vpDisplay::displayLine(I, (
int)corners[2].get_i(), (
int)corners[2].get_j(), (
int)corners[3].get_i(), (
int)corners[3].get_j(),
423 void displayTags(
const vpImage<vpRGBa> &I,
const std::vector<std::vector<vpImagePoint> > &tagsCorners,
424 const vpColor &color,
unsigned int thickness)
const
426 for (
size_t i = 0; i < tagsCorners.size(); i++) {
432 const std::vector<vpImagePoint> &corners = tagsCorners[i];
433 assert(corners.size() == 4);
435 vpDisplay::displayLine(I, (
int)corners[0].get_i(), (
int)corners[0].get_j(), (
int)corners[1].get_i(), (
int)corners[1].get_j(),
437 vpDisplay::displayLine(I, (
int)corners[0].get_i(), (
int)corners[0].get_j(), (
int)corners[3].get_i(), (
int)corners[3].get_j(),
439 vpDisplay::displayLine(I, (
int)corners[1].get_i(), (
int)corners[1].get_j(), (
int)corners[2].get_i(), (
int)corners[2].get_j(),
441 vpDisplay::displayLine(I, (
int)corners[2].get_i(), (
int)corners[2].get_j(), (
int)corners[3].get_i(), (
int)corners[3].get_j(),
449 if (m_detections == NULL) {
452 if (m_tagFamily == TAG_36ARTOOLKIT) {
454 std::cerr <<
"TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
457#if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
458 if (m_tagFamily == TAG_CIRCLE49h12 || m_tagFamily == TAG_CUSTOM48h12 || m_tagFamily == TAG_STANDARD41h12 ||
459 m_tagFamily == TAG_STANDARD52h13) {
460 std::cerr <<
"TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled."
466 apriltag_detection_t *det;
467 zarray_get(m_detections,
static_cast<int>(tagIndex), &det);
469 int nb_detections = zarray_size(m_detections);
470 if (tagIndex >= (
size_t)nb_detections) {
481 if (m_poseEstimationMethod == HOMOGRAPHY_ORTHOGONAL_ITERATION ||
482 m_poseEstimationMethod == BEST_RESIDUAL_VIRTUAL_VS) {
486 apriltag_detection_info_t info;
488 info.tagsize = tagSize;
495 getPoseWithOrthogonalMethod(info, cMo, cMo2, projErrors, projErrors2);
496 cMo_homography_ortho_iter = cMo;
500 if (m_poseEstimationMethod == HOMOGRAPHY || m_poseEstimationMethod == HOMOGRAPHY_VIRTUAL_VS ||
501 m_poseEstimationMethod == BEST_RESIDUAL_VIRTUAL_VS) {
505 apriltag_detection_info_t info;
507 info.tagsize = tagSize;
513 apriltag_pose_t pose;
514 estimate_pose_for_tag_homography(&info, &pose);
515 convertHomogeneousMatrix(pose, cMo);
517 matd_destroy(pose.R);
518 matd_destroy(pose.t);
520 cMo_homography = cMo;
528 double x = 0.0, y = 0.0;
529 std::vector<vpPoint> pts(4);
531 imPt.
set_uv(det->p[0][0], det->p[0][1]);
538 imPt.
set_uv(det->p[1][0], det->p[1][1]);
545 imPt.
set_uv(det->p[2][0], det->p[2][1]);
552 imPt.
set_uv(det->p[3][0], det->p[3][1]);
560 if (m_poseEstimationMethod != HOMOGRAPHY && m_poseEstimationMethod != HOMOGRAPHY_VIRTUAL_VS &&
561 m_poseEstimationMethod != HOMOGRAPHY_ORTHOGONAL_ITERATION) {
562 if (m_poseEstimationMethod == BEST_RESIDUAL_VIRTUAL_VS) {
565 double residual_dementhon = std::numeric_limits<double>::max(),
566 residual_lagrange = std::numeric_limits<double>::max();
568 double residual_homography_ortho_iter = pose.
computeResidual(cMo_homography_ortho_iter);
578 std::vector<double> residuals;
579 residuals.push_back(residual_dementhon);
580 residuals.push_back(residual_lagrange);
581 residuals.push_back(residual_homography);
582 residuals.push_back(residual_homography_ortho_iter);
583 std::vector<vpHomogeneousMatrix> poses;
584 poses.push_back(cMo_dementhon);
585 poses.push_back(cMo_lagrange);
586 poses.push_back(cMo_homography);
587 poses.push_back(cMo_homography_ortho_iter);
589 std::ptrdiff_t minIndex = std::min_element(residuals.begin(), residuals.end()) - residuals.begin();
590 cMo = *(poses.begin() + minIndex);
592 pose.
computePose(m_mapOfCorrespondingPoseMethods[m_poseEstimationMethod], cMo);
596 if (m_poseEstimationMethod != HOMOGRAPHY && m_poseEstimationMethod != HOMOGRAPHY_ORTHOGONAL_ITERATION) {
602 if (m_poseEstimationMethod != HOMOGRAPHY_ORTHOGONAL_ITERATION) {
604 double scale = tagSize / 2.0;
605 double data_p0[] = {-scale, scale, 0};
606 double data_p1[] = {scale, scale, 0};
607 double data_p2[] = {scale, -scale, 0};
608 double data_p3[] = {-scale, -scale, 0};
609 matd_t *p[4] = {matd_create_data(3, 1, data_p0), matd_create_data(3, 1, data_p1),
610 matd_create_data(3, 1, data_p2), matd_create_data(3, 1, data_p3)};
612 for (
int i = 0; i < 4; i++) {
615 v[i] = matd_create_data(3, 1, data_v);
618 apriltag_pose_t solution1, solution2;
619 const int nIters = 50;
624 get_second_solution(v, p, &solution1, &solution2, nIters, &err2);
626 for (
int i = 0; i < 4; i++) {
632 convertHomogeneousMatrix(solution2, *cMo2);
634 matd_destroy(solution2.R);
635 matd_destroy(solution2.t);
638 matd_destroy(solution1.R);
639 matd_destroy(solution1.t);
647 if (projErrors2 && cMo2) {
651 if (!m_zAlignedWithCameraFrame) {
672 void getPoseWithOrthogonalMethod(apriltag_detection_info_t &info,
vpHomogeneousMatrix &cMo1,
675 apriltag_pose_t pose1, pose2;
677 estimate_tag_pose_orthogonal_iteration(&info, &err_1, &pose1, &err_2, &pose2, 50);
678 if (err_1 <= err_2) {
679 convertHomogeneousMatrix(pose1, cMo1);
682 convertHomogeneousMatrix(pose2, *cMo2);
688 convertHomogeneousMatrix(pose2, cMo1);
690 convertHomogeneousMatrix(pose1, *cMo2);
694 matd_destroy(pose1.R);
695 matd_destroy(pose1.t);
697 matd_destroy(pose2.t);
699 matd_destroy(pose2.R);
707 bool getZAlignedWithCameraAxis() {
return m_zAlignedWithCameraFrame; }
709 bool getAprilTagDecodeSharpening(
double &decodeSharpening)
const
712 decodeSharpening = m_td->decode_sharpening;
718 bool getNbThreads(
int &nThreads)
const
721 nThreads = m_td->nthreads;
727 bool getQuadDecimate(
float &quadDecimate)
const
730 quadDecimate = m_td->quad_decimate;
736 bool getQuadSigma(
float &quadSigma)
const
739 quadSigma = m_td->quad_sigma;
745 bool getRefineEdges(
bool &refineEdges)
const
748 refineEdges = (m_td->refine_edges ? true :
false);
754 bool getZAlignedWithCameraAxis()
const {
return m_zAlignedWithCameraFrame; }
756 std::vector<int> getTagsId()
const {
return m_tagsId; }
758 void setAprilTagDecodeSharpening(
double decodeSharpening)
761 m_td->decode_sharpening = decodeSharpening;
765 void setNbThreads(
int nThreads)
768 m_td->nthreads = nThreads;
772 void setQuadDecimate(
float quadDecimate)
775 m_td->quad_decimate = quadDecimate;
779 void setQuadSigma(
float quadSigma)
782 m_td->quad_sigma = quadSigma;
786 void setRefineDecode(
bool) {}
788 void setRefineEdges(
bool refineEdges)
791 m_td->refine_edges = refineEdges ? 1 : 0;
795 void setRefinePose(
bool) {}
797 void setPoseEstimationMethod(
const vpPoseEstimationMethod &method) { m_poseEstimationMethod = method; }
799 void setZAlignedWithCameraAxis(
bool zAlignedWithCameraFrame) { m_zAlignedWithCameraFrame = zAlignedWithCameraFrame; }
802 std::map<vpPoseEstimationMethod, vpPose::vpPoseMethodType> m_mapOfCorrespondingPoseMethods;
803 vpPoseEstimationMethod m_poseEstimationMethod;
804 std::vector<int> m_tagsId;
805 vpAprilTagFamily m_tagFamily;
806 apriltag_detector_t *m_td;
807 apriltag_family_t *m_tf;
808 zarray_t *m_detections;
809 bool m_zAlignedWithCameraFrame;
815 : m_displayTag(false), m_displayTagColor(
vpColor::none), m_displayTagThickness(2),
816 m_poseEstimationMethod(poseEstimationMethod), m_tagFamily(tagFamily), m_defaultCam(),
817 m_impl(new Impl(tagFamily, poseEstimationMethod))
822 :
vpDetectorBase(o), m_displayTag(false), m_displayTagColor(
vpColor::none), m_displayTagThickness(2),
823 m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagFamily(o.m_tagFamily), m_defaultCam(),
824 m_impl(new Impl(*o.m_impl))
849 std::vector<vpHomogeneousMatrix> cMo_vec;
850 const double tagSize = 1.0;
876 std::vector<vpHomogeneousMatrix> &cMo_vec, std::vector<vpHomogeneousMatrix> *cMo_vec2,
877 std::vector<double> *projErrors, std::vector<double> *projErrors2)
907 m_impl->displayFrames(I, cMo_vec, cam, size, color, thickness);
923 m_impl->displayFrames(I, cMo_vec, cam, size, color, thickness);
935 const vpColor &color,
unsigned int thickness)
const
937 m_impl->displayTags(I, tagsCorners, color, thickness);
949 const vpColor &color,
unsigned int thickness)
const
951 m_impl->displayTags(I, tagsCorners, color, thickness);
989 return m_impl->getPose(tagIndex, tagSize, cam, cMo, cMo2, projError, projError2);
1010 const std::map<int, double> &tagsSize)
const
1012 std::vector<std::vector<vpPoint> > tagsPoints3D;
1014 double default_size = -1;
1016 std::map<int, double>::const_iterator it = tagsSize.find(-1);
1017 if (it != tagsSize.end()) {
1018 default_size = it->second;
1021 for (
size_t i = 0; i < tagsId.size(); i++) {
1022 std::map<int, double>::const_iterator it = tagsSize.find(tagsId[i]);
1023 double tagSize = default_size;
1024 if (it == tagsSize.end()) {
1025 if (default_size < 0) {
1027 "Tag with id %d has no 3D size or there is no default 3D size defined", tagsId[i]));
1030 tagSize = it->second;
1032 std::vector<vpPoint> points3D(4);
1033 if (m_impl->getZAlignedWithCameraAxis()) {
1034 points3D[0] =
vpPoint(-tagSize / 2, tagSize / 2, 0);
1035 points3D[1] =
vpPoint(tagSize / 2, tagSize / 2, 0);
1036 points3D[2] =
vpPoint(tagSize / 2, -tagSize / 2, 0);
1037 points3D[3] =
vpPoint(-tagSize / 2, -tagSize / 2, 0);
1039 points3D[0] =
vpPoint(-tagSize / 2, -tagSize / 2, 0);
1040 points3D[1] =
vpPoint(tagSize / 2, -tagSize / 2, 0);
1041 points3D[2] =
vpPoint(tagSize / 2, tagSize / 2, 0);
1042 points3D[3] =
vpPoint(-tagSize / 2, tagSize / 2, 0);
1044 tagsPoints3D.push_back(points3D);
1047 return tagsPoints3D;
1066 return m_impl->setAprilTagDecodeSharpening(decodeSharpening);
1072 double decodeSharpening = 0.25;
1073 m_impl->getAprilTagDecodeSharpening(decodeSharpening);
1075 m_impl->getNbThreads(nThreads);
1076 float quadDecimate = 1;
1077 m_impl->getQuadDecimate(quadDecimate);
1078 float quadSigma = 0;
1079 m_impl->getQuadSigma(quadSigma);
1080 bool refineEdges =
true;
1081 m_impl->getRefineEdges(refineEdges);
1082 bool zAxis = m_impl->getZAlignedWithCameraAxis();
1086 m_impl->setAprilTagDecodeSharpening(decodeSharpening);
1087 m_impl->setNbThreads(nThreads);
1088 m_impl->setQuadDecimate(quadDecimate);
1089 m_impl->setQuadSigma(quadSigma);
1090 m_impl->setRefineEdges(refineEdges);
1091 m_impl->setZAlignedWithCameraAxis(zAxis);
1102 m_impl->setNbThreads(nThreads);
1114 m_impl->setPoseEstimationMethod(poseEstimationMethod);
1145#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1151 m_impl->setRefineDecode(refineDecode);
1171#if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1182 swap(o1.m_impl, o2.m_impl);
1192 m_impl->setZAlignedWithCameraAxis(zAlignedWithCameraFrame);
1195#elif !defined(VISP_BUILD_SHARED_LIBS)
1198void dummy_vpDetectorAprilTag() {}
Type * data
Address of the first element of the data array.
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor blue
static const vpColor yellow
static const vpColor green
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
std::vector< std::vector< vpImagePoint > > getTagsCorners() const
void setAprilTagQuadDecimate(float quadDecimate)
friend void swap(vpDetectorAprilTag &o1, vpDetectorAprilTag &o2)
virtual ~vpDetectorAprilTag()
void displayFrames(const vpImage< unsigned char > &I, const std::vector< vpHomogeneousMatrix > &cMo_vec, const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness=1) const
vpDetectorAprilTag & operator=(vpDetectorAprilTag o)
std::vector< std::vector< vpPoint > > getTagsPoints3D(const std::vector< int > &tagsId, const std::map< int, double > &tagsSize) const
bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2=NULL, double *projError=NULL, double *projError2=NULL)
unsigned int m_displayTagThickness
void setAprilTagRefineDecode(bool refineDecode)
void setAprilTagRefineEdges(bool refineEdges)
vpPoseEstimationMethod m_poseEstimationMethod
void setAprilTagQuadSigma(float quadSigma)
void setAprilTagNbThreads(int nThreads)
bool detect(const vpImage< unsigned char > &I)
vpDetectorAprilTag(const vpAprilTagFamily &tagFamily=TAG_36h11, const vpPoseEstimationMethod &poseEstimationMethod=HOMOGRAPHY_VIRTUAL_VS)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
std::vector< int > getTagsId() const
void displayTags(const vpImage< unsigned char > &I, const std::vector< std::vector< vpImagePoint > > &tagsCorners, const vpColor &color=vpColor::none, unsigned int thickness=1) const
void setAprilTagFamily(const vpAprilTagFamily &tagFamily)
void setAprilTagRefinePose(bool refinePose)
vpColor m_displayTagColor
void setAprilTagDecodeSharpening(double decodeSharpening)
std::vector< std::string > m_message
Message attached to each object.
std::vector< std::vector< vpImagePoint > > m_polygon
For each object, defines the polygon that contains the object.
size_t m_nb_objects
Number of detected objects.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpRotationMatrix getRotationMatrix() const
vpTranslationVector getTranslationVector() const
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_uv(double u, double v)
Definition of the vpImage class member functions.
unsigned int getWidth() const
Type * bitmap
points toward the bitmap
unsigned int getHeight() const
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
void set_x(double x)
Set the point x coordinate in the image plane.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void addPoints(const std::vector< vpPoint > &lP)
double computeResidual(const vpHomogeneousMatrix &cMo) const
Compute and return the sum of squared residuals expressed in meter^2 for the pose matrix cMo.
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)