From fb432c28602202463f8958b189591d036551404e Mon Sep 17 00:00:00 2001 From: kevineck Date: Mon, 3 Jun 2019 14:56:50 -0400 Subject: [PATCH 1/6] Started adding the types --- ov_core/CMakeLists.txt | 2 +- ov_core/src/track/TrackBase.cpp | 1 + ov_core/src/types/IMU.h | 144 ++++++++++++++++++++++++++++++++ ov_core/src/types/JPLPose.h | 106 +++++++++++++++++++++++ ov_core/src/types/JPLQuat.h | 80 ++++++++++++++++++ ov_core/src/types/Type.h | 78 +++++++++++++++++ ov_core/src/types/Vec.h | 34 ++++++++ 7 files changed, 444 insertions(+), 1 deletion(-) create mode 100644 ov_core/src/types/IMU.h create mode 100644 ov_core/src/types/JPLPose.h create mode 100644 ov_core/src/types/JPLQuat.h create mode 100644 ov_core/src/types/Type.h create mode 100644 ov_core/src/types/Vec.h diff --git a/ov_core/CMakeLists.txt b/ov_core/CMakeLists.txt index 95be44640..2c6c64059 100644 --- a/ov_core/CMakeLists.txt +++ b/ov_core/CMakeLists.txt @@ -67,7 +67,7 @@ add_library(ov_core src/track/TrackAruco.cpp src/track/TrackDescriptor.cpp src/track/TrackKLT.cpp -) + src/types/Type.h src/types/Vec.h src/types/JPLQuat.h src/types/JPLPose.h src/types/IMU.h) target_link_libraries(ov_core ${thirdparty_libraries}) diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index 5f05df54e..d4c7b794f 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -1,4 +1,5 @@ #include "TrackBase.h" +#include "types/IMU.h" void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h new file mode 100644 index 000000000..4579f14c2 --- /dev/null +++ b/ov_core/src/types/IMU.h @@ -0,0 +1,144 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef PROJECT_JPLQUAT_H +#define PROJECT_JPLQUAT_H + +#endif //PROJECT_JPLQUAT_H + +#include "utils/quat_ops.h" +#include "JPLPose.h" + +class IMU : public Type{ + +public: + + IMU() : Type(15){ + pose = new JPLPose(); + v = new Vec(3); + bg = new Vec(3); + ba = new Vec(3); + } + + ~IMU() { } + + void update(const Eigen::VectorXd dx){ + + assert(dx.rows()== _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5*dx.block(0,0,3,1) , 1.0; + dq= dq/dq.norm(); + + newX.block(0,0,4,1) = quat_multiply(_value, dq); + newX.block(4,0,3,1) += dx.block(3,0,3,1); + + newX.block(7,0,3,1) += dx.block(6,0,3,1); + newX.block(10,0,3,1) += dx.block(9,0,3,1); + newX.block(13,0,3,1) += dx.block(12,0,3,1); + + set_value(newX); + + } + + //Sets the value of the estimate + void set_value(const Eigen::VectorXd new_value){ + + pose->set_value(new_value.block(0,0,7,1)); + v->set_value(new_value.block(7,0,3,1)); + bg->set_value(new_value.block(10,0,3,1)); + ba->set_value(new_value.block(13,0,3,1)); + + _value = new_value; + } + + //Sets the value of the estimate + void set_fej(const Eigen::VectorXd new_value){ + + pose->set_fej_value(new_value.block(0,0,7,1)); + v->set_fej(new_value.block(7,0,3,1)); + bg->set_fej(new_value.block(10,0,3,1)); + ba->set_fej(new_value.block(13,0,3,1)); + + _fej = new_value; + } + + Type* clone(){ + Type* Clone= new IMU(); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + //Rotation access + Eigen::Matrix Rot() const{ + return pose->Rot(); + } + + //FEJ Rotation access + Eigen::Matrix Rot_fej() const{ + return pose->Rot_fej(); + } + + //Rotation access + Eigen::Matrix quat() const{ + return pose->quat(); + } + + //FEJ Rotation access + Eigen::Matrix quat_fej() const{ + return pose->quat_fej(); + } + + + //position access + Eigen::Matrix pos() const{ + return pose->pos(); + } + + //FEJ position access + Eigen::Matrix pos_fej() const{ + return pose->pos_fej(); + } + + //Velocity access + Eigen::Matrix vel() const{ + return v->value(); + } + + //FEJ position access + Eigen::Matrix vel_fej() const{ + return v->fej(); + } + + //gyro bias access + Eigen::Matrix bias_g() const{ + return bg->value(); + } + + //FEJ gyro bias access + Eigen::Matrix bias_g_fej() const{ + return bg->fej(); + } + + //accel bias access + Eigen::Matrix bias_a() const{ + return ba->value(); + } + + //FEJ accel bias access + Eigen::Matrix bias_a_fej() const{ + return ba->fej(); + } + +protected: + + JPLPose *pose; + Vec *v; + Vec *bg; + Vec *ba; + +}; \ No newline at end of file diff --git a/ov_core/src/types/JPLPose.h b/ov_core/src/types/JPLPose.h new file mode 100644 index 000000000..9408e9293 --- /dev/null +++ b/ov_core/src/types/JPLPose.h @@ -0,0 +1,106 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef PROJECT_JPLPOSE_H +#define PROJECT_JPLPOSE_H + +#endif //PROJECT_JPLPOSE_H + +#include "utils/quat_ops.h" +#include "JPLQuat.h" +#include "Vec.h" + +class JPLPose : public Type{ + +public: + + JPLPose() : Type(6){ + q = new JPLQuat(); + p = new Vec(3); + } + + ~JPLPose() { } + + void update(const Eigen::VectorXd dx){ + + assert(dx.rows()== _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5*dx.block(0,0,3,1) , 1.0; + dq= dq/dq.norm(); + + newX.block(0,0,4,1) = quat_multiply(dq, quat()); + newX.block(4,0,3,1) += dx.block(3,0,3,1); + + set_value(newX); + + } + + //Sets the value of the estimate + void set_value(const Eigen::VectorXd new_value){ + + assert(new_value.rows() == 7); + + q->set_value(new_value.block(0,0,4,1)); + p->set_value(new_value.block(4,0,3,1)); + + _value = new_value; + } + + //Sets the value of the estimate + void set_fej_value(const Eigen::VectorXd new_value){ + + assert(new_value.rows() == 7); + q->set_fej(new_value.block(0,0,4,1)); + p->set_fej(new_value.block(4,0,3,1)); + + _fej = new_value; + } + + Type* clone(){ + Type* Clone= new JPLPose(); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + //Rotation access + Eigen::Matrix Rot() const{ + return q->Rot(); + } + + //FEJ Rotation access + Eigen::Matrix Rot_fej() const{ + return q->Rot_fej();; + } + + //Rotation access + Eigen::Matrix quat() const{ + return q->value(); + } + + //FEJ Rotation access + Eigen::Matrix quat_fej() const{ + return q->fej(); + } + + + //position access + Eigen::Matrix pos() const{ + return p->value(); + } + + //FEJ position access + Eigen::Matrix pos_fej() const{ + return p->fej(); + } + +protected: + + JPLQuat *q; + Vec *p; + +}; \ No newline at end of file diff --git a/ov_core/src/types/JPLQuat.h b/ov_core/src/types/JPLQuat.h new file mode 100644 index 000000000..fe4599102 --- /dev/null +++ b/ov_core/src/types/JPLQuat.h @@ -0,0 +1,80 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef PROJECT_JPLQUAT_H +#define PROJECT_JPLQUAT_H + +#endif //PROJECT_JPLQUAT_H + +#include "utils/quat_ops.h" +#include "Type.h" + +class JPLQuat : public Type{ + +public: + + JPLQuat() : Type(3){} + + ~JPLQuat() { } + + void update(const Eigen::VectorXd dx){ + + assert(dx.rows()== _size); + + Eigen::Matrix dq; + dq << .5*dx , 1.0; + dq= dq/dq.norm(); + + set_value(quat_multiply(dq, _value)); + + } + + //Sets the value of the estimate + void set_value(const Eigen::VectorXd new_value){ + + assert(new_value.rows() == 7); + + _value = new_value; + + //compute associated rotation + _R = quat_2_Rot(new_value); + } + + //Sets the value of the estimate + void set_fej_value(const Eigen::VectorXd new_value){ + + assert(new_value.rows() == 7); + + _fej = new_value; + + //compute associated rotation + _Rfej = quat_2_Rot(new_value); + } + + Type* clone(){ + Type* Clone= new JPLQuat(); + Clone->set_value(value()); + return Clone; + } + + //Rotation access + Eigen::Matrix Rot() const{ + return _R; + } + + //FEJ Rotation access + Eigen::Matrix Rot_fej() const{ + return _Rfej; + } + +protected: + + + //Stores the rotation + Eigen::Matrix _R; + + //Stores the first-estimate rotation + Eigen::Matrix _Rfej; + +}; \ No newline at end of file diff --git a/ov_core/src/types/Type.h b/ov_core/src/types/Type.h new file mode 100644 index 000000000..6eafe8495 --- /dev/null +++ b/ov_core/src/types/Type.h @@ -0,0 +1,78 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef PROJECT_TYPE_H +#define PROJECT_TYPE_H + +#include + +class Type { + +public: + + Type(int size_){_size= size_;} + + virtual ~Type() { }; + + //Set id of variable in covariance + virtual void set_local_id(int new_id){ + _id= new_id; + } + + int id(){ + return _id; + } + + int size(){ + return _size; + } + + //Update estimate using local perturbation 0 + virtual void update(const Eigen::VectorXd dx) = 0; + + //Returns the estimate + virtual Eigen::VectorXd value() const{ + return _value; + } + + //Returns the first estimate for fej + virtual Eigen::VectorXd fej() const{ + return _fej; + } + + //Sets the value of the estimate + virtual void set_value(const Eigen::VectorXd new_value){ + _value = new_value; + } + + //Sets the value of the estimate + virtual void set_fej(const Eigen::VectorXd new_value){ + _fej = new_value; + } + + virtual Type* clone()=0; + + //Used to find if variable is this one. Returns nullptr if not. + // Overloaded to find subvariables as well + virtual Type* check_if_same_variable(const Type* check){ + if (check == this){ + return this; + } + else{ + return nullptr; + } + } + +protected: + + Eigen::VectorXd _fej; + Eigen::VectorXd _value; + + int _id = -1; + int _size = -1; + + +}; + +#endif //PROJECT_TYPE_H \ No newline at end of file diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h new file mode 100644 index 000000000..eb90c9017 --- /dev/null +++ b/ov_core/src/types/Vec.h @@ -0,0 +1,34 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef PROJECT_VEC_H +#define PROJECT_VEC_H + +#endif //PROJECT_VEC_H + +#include "Type.h" + +class Vec: public Type { + +public: + + Vec(int dim) : Type(dim){} + + ~Vec() { } + + void update(const Eigen::VectorXd dx){ + + assert(dx.rows() == _size); + set_value(_value + dx); + } + + Type* clone(){ + Type* Clone= new Vec(_size); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + +}; \ No newline at end of file From 63e96de1d44f672546ef90efbec9f67f14f75f1c Mon Sep 17 00:00:00 2001 From: kevineck Date: Tue, 4 Jun 2019 07:39:22 -0400 Subject: [PATCH 2/6] started adding state --- ov_core/CMakeLists.txt | 2 +- ov_core/src/state/State.h | 58 ++++++++++++++++++++++ ov_core/src/track/TrackBase.cpp | 2 +- ov_core/src/types/IMU.h | 8 +-- ov_core/src/types/{JPLPose.h => PoseJPL.h} | 14 +++--- ov_core/src/types/Vec.h | 6 +-- 6 files changed, 74 insertions(+), 16 deletions(-) create mode 100644 ov_core/src/state/State.h rename ov_core/src/types/{JPLPose.h => PoseJPL.h} (93%) diff --git a/ov_core/CMakeLists.txt b/ov_core/CMakeLists.txt index 2c6c64059..d3e965de8 100644 --- a/ov_core/CMakeLists.txt +++ b/ov_core/CMakeLists.txt @@ -67,7 +67,7 @@ add_library(ov_core src/track/TrackAruco.cpp src/track/TrackDescriptor.cpp src/track/TrackKLT.cpp - src/types/Type.h src/types/Vec.h src/types/JPLQuat.h src/types/JPLPose.h src/types/IMU.h) + src/types/Type.h src/types/Vec.h src/types/JPLQuat.h src/types/PoseJPL.h src/types/IMU.h src/state/State.h) target_link_libraries(ov_core ${thirdparty_libraries}) diff --git a/ov_core/src/state/State.h b/ov_core/src/state/State.h new file mode 100644 index 000000000..21611a3e0 --- /dev/null +++ b/ov_core/src/state/State.h @@ -0,0 +1,58 @@ +// +// Created by keck on 6/3/19. +// + +#ifndef PROJECT_STATE_H +#define PROJECT_STATE_H + +#include "types/IMU.h" +#include "types/Vec.h" +#include "types/PoseJPL.h" +#include + +class Landmark; + + +class State{ + + + //Return pointer to IMU + IMU *imu(){ + return _imu; + } + + + //Return reference to covariance + Eigen::MatrixXd& Cov(){ + return _Cov; + } + +protected: + + //Covariance of the state + Eigen::MatrixXd _Cov; + + //Pointer to the "active" IMU state + + IMU *_imu; + + + // Calibration poses for each camera (R_ItoC, p_IinC) + std::map calib_IMUtoCAM; + + + // Our current set of SLAM features (3d positions) + std::map features_SLAM; + + //Time offset base IMU to camera + Vec* calib_dt_CAMtoIMU; + + //Camera intrinsics + std::map cam_intrinsics; + + +}; + + + +#endif //PROJECT_STATE_H diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index d4c7b794f..82cad91ac 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -1,5 +1,5 @@ #include "TrackBase.h" -#include "types/IMU.h" +#include "state/State.h" void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h index 4579f14c2..7c58a3a64 100644 --- a/ov_core/src/types/IMU.h +++ b/ov_core/src/types/IMU.h @@ -8,14 +8,14 @@ #endif //PROJECT_JPLQUAT_H #include "utils/quat_ops.h" -#include "JPLPose.h" +#include "PoseJPL.h" class IMU : public Type{ public: IMU() : Type(15){ - pose = new JPLPose(); + pose = new PoseJPL(); v = new Vec(3); bg = new Vec(3); ba = new Vec(3); @@ -33,7 +33,7 @@ class IMU : public Type{ dq << .5*dx.block(0,0,3,1) , 1.0; dq= dq/dq.norm(); - newX.block(0,0,4,1) = quat_multiply(_value, dq); + newX.block(0,0,4,1) = quat_multiply(dq, _value); newX.block(4,0,3,1) += dx.block(3,0,3,1); newX.block(7,0,3,1) += dx.block(6,0,3,1); @@ -136,7 +136,7 @@ class IMU : public Type{ protected: - JPLPose *pose; + PoseJPL *pose; Vec *v; Vec *bg; Vec *ba; diff --git a/ov_core/src/types/JPLPose.h b/ov_core/src/types/PoseJPL.h similarity index 93% rename from ov_core/src/types/JPLPose.h rename to ov_core/src/types/PoseJPL.h index 9408e9293..8337b3fd4 100644 --- a/ov_core/src/types/JPLPose.h +++ b/ov_core/src/types/PoseJPL.h @@ -5,22 +5,20 @@ #ifndef PROJECT_JPLPOSE_H #define PROJECT_JPLPOSE_H -#endif //PROJECT_JPLPOSE_H - #include "utils/quat_ops.h" #include "JPLQuat.h" #include "Vec.h" -class JPLPose : public Type{ +class PoseJPL : public Type{ public: - JPLPose() : Type(6){ + PoseJPL() : Type(6){ q = new JPLQuat(); p = new Vec(3); } - ~JPLPose() { } + ~PoseJPL() { } void update(const Eigen::VectorXd dx){ @@ -61,7 +59,7 @@ class JPLPose : public Type{ } Type* clone(){ - Type* Clone= new JPLPose(); + Type* Clone= new PoseJPL(); Clone->set_value(value()); Clone->set_fej(fej()); return Clone; @@ -103,4 +101,6 @@ class JPLPose : public Type{ JPLQuat *q; Vec *p; -}; \ No newline at end of file +}; + +#endif //PROJECT_JPLPOSE_H \ No newline at end of file diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h index eb90c9017..37732fbb4 100644 --- a/ov_core/src/types/Vec.h +++ b/ov_core/src/types/Vec.h @@ -5,8 +5,6 @@ #ifndef PROJECT_VEC_H #define PROJECT_VEC_H -#endif //PROJECT_VEC_H - #include "Type.h" class Vec: public Type { @@ -31,4 +29,6 @@ class Vec: public Type { } -}; \ No newline at end of file +}; + +#endif //PROJECT_VEC_H \ No newline at end of file From 291ae51004a38726c8b2d4af249f10d080fa4be4 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 4 Jun 2019 11:48:33 -0400 Subject: [PATCH 3/6] Added the State, Options, and StateHelper classes. Also added doxygen comments --- Doxyfile-mcss | 38 +++- ov_core/src/state/Options.h | 35 ++++ ov_core/src/state/State.h | 194 ++++++++++++++++-- ov_core/src/state/StateHelper.h | 343 ++++++++++++++++++++++++++++++++ ov_core/src/track/TrackBase.cpp | 2 +- ov_core/src/types/IMU.h | 163 +++++++++++---- ov_core/src/types/JPLQuat.h | 40 +++- ov_core/src/types/PoseJPL.h | 99 +++++++-- ov_core/src/types/Type.h | 61 +++++- ov_core/src/types/Vec.h | 25 ++- 10 files changed, 903 insertions(+), 97 deletions(-) create mode 100644 ov_core/src/state/Options.h create mode 100644 ov_core/src/state/StateHelper.h diff --git a/Doxyfile-mcss b/Doxyfile-mcss index 8b52b3966..fcc55d976 100644 --- a/Doxyfile-mcss +++ b/Doxyfile-mcss @@ -3,13 +3,39 @@ GENERATE_HTML = NO GENERATE_XML = YES XML_PROGRAMLISTING = NO -##! HTML_EXTRA_STYLESHEET = \ -##! https://fonts.googleapis.com/css?family=Libre+Baskerville:400,400i,700,700i%7CSource+Code+Pro:400,400i,600 \ -##! ../css/m-light+doxygen.compiled.css -##! M_THEME_COLOR = #cb4b16 -##! M_FAVICON = favicon-light.png +HTML_EXTRA_STYLESHEET = \ + https://fonts.googleapis.com/css?family=Source+Sans+Pro:400,400i,600,600i%7CSource+Code+Pro:400,400i,600&subset=latin-ext \ + ../css/m-dark+documentation.compiled.css + + +## No need to expose to-do list or bug list in public docs. +GENERATE_TODOLIST = NO +GENERATE_BUGLIST = NO + +## Add nice shorthand for code comments +## https://mcss.mosra.cz/documentation/doxygen/#code-highlighting +ALIASES += \ + "cb{1}=@code{\1}" \ + "ce=@endcode" \ + "cpp=@code{.cpp}" \ + "cmake=@code{.cmake}" \ + "m_div{1}=@xmlonly@endxmlonly" \ + "m_enddiv=@xmlonly@endxmlonly" \ + "m_span{1}=@xmlonly@endxmlonly" \ + "m_endspan=@xmlonly@endxmlonly" \ + "m_class{1}=@xmlonly@endxmlonly" \ + "m_footernavigation=@xmlonly@endxmlonly" \ + "m_examplenavigation{2}=@xmlonly@endxmlonly" \ + "m_keywords{1}=@xmlonly@endxmlonly" \ + "m_keyword{3}=@xmlonly@endxmlonly" \ + "m_enum_values_as_keywords=@xmlonly@endxmlonly" + + +##! M_THEME_COLOR = #22272e +##! M_FAVICON = favicon-dark.png ##! M_LINKS_NAVBAR1 = pages annotated files ##! M_LINKS_NAVBAR2 = \ -##! "GitHub" \ No newline at end of file +##! "GitHub" +##! M_SEARCH_DOWNLOAD_BINARY = YES \ No newline at end of file diff --git a/ov_core/src/state/Options.h b/ov_core/src/state/Options.h new file mode 100644 index 000000000..9f907a2c5 --- /dev/null +++ b/ov_core/src/state/Options.h @@ -0,0 +1,35 @@ +// +// Created by keck on 6/4/19. +// + +#ifndef OPEN_VINS_OPTIONS_H +#define OPEN_VINS_OPTIONS_H + +/**@brief Struct which stores all our filter options + */ + +struct Options{ + + /// Bool to determine whether or not to calibrate imu-to-camera pose + bool do_calib_camera_pose; + + /// Bool to determine whether or not to calibrate camera intrinsics + bool do_calib_camera_intrinsics; + + /// Bool to determine whether or not to calibrate camera to IMU time offset + bool do_calib_camera_timeoffset; + + /// Max clone size of sliding window + size_t max_clone_size; + + /// Max number of estimated SLAM features + size_t max_slam_features; + + /// Number of cameras + size_t num_cameras; + +}; + + + +#endif //OPEN_VINS_OPTIONS_H diff --git a/ov_core/src/state/State.h b/ov_core/src/state/State.h index 21611a3e0..20899f29f 100644 --- a/ov_core/src/state/State.h +++ b/ov_core/src/state/State.h @@ -8,47 +8,215 @@ #include "types/IMU.h" #include "types/Vec.h" #include "types/PoseJPL.h" +#include "Options.h" #include class Landmark; +/** @brief Class which manages the filter state +*/ class State{ +public: - //Return pointer to IMU + /** @brief Default Constructor + * @param options_ Options structure containing filter options + */ + State(Options &options_) : _options(options_){} + + ~State(){} + + /** + * @brief Initializes pointers and covariance + * TODO: Read initial values and covariance from options + */ + void initialize_variables(){ + + double current_id = 0; + _imu = new IMU(); + _imu->set_local_id(current_id); + insert_variable(_imu); + + current_id += 15; + + //Camera to IMU time offset + _calib_dt_CAMtoIMU = new Vec(1); + if (_options.do_calib_camera_timeoffset){ + _calib_dt_CAMtoIMU->set_local_id(current_id); + insert_variable(_calib_dt_CAMtoIMU); + current_id++; + } + + for (size_t i = 0; i < _options.num_cameras; i++){ + //Allocate pose + PoseJPL *pose = new PoseJPL(); + //Allocate intrinsics + Vec *intrin = new Vec(8); + + //Add these to the corresponding maps + _calib_IMUtoCAM.insert({i, pose}); + _cam_intrinsics.insert({i, intrin}); + + //If calibrating camera-imu pose, add to variables + if (_options.do_calib_camera_pose){ + pose->set_local_id(current_id); + current_id += 6; + insert_variable(pose); + } + //If calibrating camera intrinsics, add to variables + if (_options.do_calib_camera_intrinsics){ + intrin->set_local_id(current_id); + current_id += 8; + insert_variable(intrin); + } + } + + _Cov = Eigen::MatrixXd::Zero(current_id, current_id); + + + + } + + + /** @brief Access IMU pointer + * + */ IMU *imu(){ return _imu; } - //Return reference to covariance + /** @brief Access reference to covariance + */ Eigen::MatrixXd& Cov(){ return _Cov; } + /** @brief Access variables in state + */ + std::vector & variables(){ + return _variables; + } + + /** @brief Insert new variable + * @param newType Variable to insert + */ + void insert_variable(Type* newType){ + _variables.push_back(newType); + } + + /** + * @brief For a given set of variables, this will this will calculate a smaller covariance + * That only includes the ones specified with all cross terms + * @param small_variables Vector of variables whose marginal covariance is desired + */ + Eigen::MatrixXd get_marginal_covariance(const std::vector &small_variables){ + + // Calculate the marginal covariance size we need ot make our matrix + int cov_size=0; + for (size_t i=0; i < small_variables.size(); i++){ + cov_size += small_variables[i]->size(); + } + + // Construct our return covariance + Eigen::MatrixXd Small_cov(cov_size,cov_size); + + // For each variable, lets copy over all other variable cross terms + // Note: this copies over itself to when i_index=k_index + int i_index=0; + for (size_t i=0; i < small_variables.size(); i++){ + int k_index=0; + for (size_t k=0; k < small_variables.size(); k++){ + Small_cov.block(i_index, k_index, small_variables[i]->size(),small_variables[k]->size()) = + _Cov.block(small_variables[i]->id(), small_variables[k]->id(), small_variables[i]->size(), + small_variables[k]->size()); + k_index += small_variables[k]->size(); + } + i_index += small_variables[i]->size(); + } + + // Return the covariance + return Small_cov; + } + + /** + * @brief Given an update vector, updates each variable + * @param dx Correction vector for the entire filter state + */ + void update(const Eigen::MatrixXd dx){ + for (size_t i=0; i < _variables.size(); i++){ + _variables[i]->update(dx.block(_variables[i]->id(),0,_variables[i]->size(),1)); + } + } + + /** + * @brief Insert new clone + * @param timestamp Timestamp associated with new clone + * @param pose Pointer to new clone pose + */ + void insert_clone(double timestamp, PoseJPL* pose){ + _clones_IMU.insert({timestamp, pose}); + } + + /// Access current timestamp + double timestamp(){ + return _timestamp; + } + + //Access options + Options& options(){ + return _options; + } + + //Access imu-camera time offset pointer + Vec* calib_dt_CAMtoIMU(){ + return _calib_dt_CAMtoIMU; + } + + /** + * @brief Get clone at a given timestamp + * @param timestamp + */ + PoseJPL* get_clone(double timestamp){ + return _clones_IMU[timestamp]; + } + protected: - //Covariance of the state - Eigen::MatrixXd _Cov; + /// Current timestamp + double _timestamp; + + ///Structure containing filter options + Options _options; + - //Pointer to the "active" IMU state + /// Covariance of the state + Eigen::MatrixXd _Cov; + /// Pointer to the "active" IMU state IMU *_imu; - // Calibration poses for each camera (R_ItoC, p_IinC) - std::map calib_IMUtoCAM; + /// Calibration poses for each camera (R_ItoC, p_IinC) + std::map _calib_IMUtoCAM; + + + /// Our current set of SLAM features (3d positions) + std::map _features_SLAM; + + + /// Map between imaging times and clone poses + std::map _clones_IMU; + /// Time offset base IMU to camera + Vec* _calib_dt_CAMtoIMU; - // Our current set of SLAM features (3d positions) - std::map features_SLAM; + /// Camera intrinsics + std::map _cam_intrinsics; - //Time offset base IMU to camera - Vec* calib_dt_CAMtoIMU; - //Camera intrinsics - std::map cam_intrinsics; + std::vector _variables; }; diff --git a/ov_core/src/state/StateHelper.h b/ov_core/src/state/StateHelper.h new file mode 100644 index 000000000..48a53b466 --- /dev/null +++ b/ov_core/src/state/StateHelper.h @@ -0,0 +1,343 @@ +// +// Created by keck on 6/4/19. +// + +#ifndef OPEN_VINS_STATEHELPER_H +#define OPEN_VINS_STATEHELPER_H + +/** @brief Class where all the static functions used to manipulate the covariance are implemented + */ + +#include "State.h" + +class StateHelper{ + +public: + + /** + * @brief Clones "variable to clone" and places it at end of covariance + * @param state Pointer to state + * @param variable_to_clone Pointer to variable that will be cloned + */ + static Type* clone(State* state, Type* variable_to_clone) { + + //Get total size of new cloned variables, and the old covariance size + int total_size = variable_to_clone->size(); + int old_size = (int)state->Cov().rows(); + int new_loc = (int)state->Cov().rows(); + + // Resize both our covariance to the new size + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->Cov().rows()+total_size,state->Cov().rows()+total_size)); + + // What is the new state, and variable we inserted + const std::vector new_variables = state->variables(); + Type* new_clone = nullptr; + + // Loop through all variables, and find the variable that we are going to clone + for (size_t k=0; k < state->variables().size(); k++){ + + // Skip this if it is not the same + Type* type_check = state->variables()[k]->check_if_same_variable(variable_to_clone); + if (type_check == nullptr) + continue; + + //So we will clone this one + int old_loc = type_check->id(); + + // Copy the covariance elements + state->Cov().block(new_loc,new_loc,total_size,total_size) = state->Cov().block(old_loc,old_loc,total_size,total_size); + state->Cov().block(0,new_loc,old_size,total_size) = state->Cov().block(0,old_loc,old_size,total_size); + state->Cov().block(new_loc,0,total_size,old_size) = state->Cov().block(old_loc,0,total_size,old_size); + + //Create clone from the type being cloned + new_clone = type_check->clone(); + new_clone->set_local_id(new_loc); + + //Add to variable list + state->insert_variable(new_clone); + break; + + } + + // Check if the current state has the GPS enabled + if(new_clone == nullptr) { + std::cerr << "CovManager::clone() - Called on variable is not in the state" << std::endl; + std::cerr << "CovManager::clone() - Ensure that the variable specified is a variable, or sub-variable.." << std::endl; + std::exit(EXIT_FAILURE); + } + + return new_clone; + + } + + + /** @brief Performs EKF Update + * @param state Pointer to state + * @param H_order Variable ordering used in the compressed Jacobian + * @param H Condensed Jacobian of updating measurement + * @param res residual of updating measurement + * @param R updating measurement covariance + * + */ + static void EKFUpdate(State* state, const std::vector &H_order, const Eigen::MatrixXd &H, + const Eigen::VectorXd &res, const Eigen::MatrixXd &R){ + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + assert(res.rows() == R.rows()); + assert(H.rows() == res.rows()); + + + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->Cov().rows(), res.rows()); + + std::vector H_id; + std::vector H_is_active; + int current_it=0; + + // Get the location in small jacobian for each measuring variable + for (Type* meas_var: H_order){ + H_id.push_back(current_it); + current_it+=meas_var->size(); + } + + auto Cov = state->Cov(); + + //========================================================== + //========================================================== + // For each active variable find its M = P*H^T + for (Type* var: state->variables()) { + // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) + Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); + for (size_t i = 0; i < H_order.size(); i++) { + Type *meas_var = H_order[i]; + M_i.noalias() += Cov.block(var->id(), meas_var->id(), var->size(), meas_var->size()) * + H.block(0, H_id[i], H.rows(), meas_var->size()).transpose(); + } + M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; + } + + //========================================================== + //========================================================== + //Get S + Eigen::MatrixXd P_small = state->get_marginal_covariance(H_order); + + // S = H*Cov*H' + R + Eigen::MatrixXd S(R.rows(), R.rows()); + S.triangularView() = H*P_small*H.transpose(); + S.triangularView() += R; + //S = 0.5*(S+S.transpose()); + + // Inverse our S (should we use a more stable method here??) + Eigen::MatrixXd Sinv = Eigen::MatrixXd::Identity(R.rows(), R.rows()); + S.selfadjointView().llt().solveInPlace(Sinv); + Eigen::MatrixXd K = M_a*Sinv.selfadjointView(); + + // Update Covariance + Cov.triangularView() -= K*M_a.transpose(); + Cov = Cov.selfadjointView(); + + // Calculate our delta and pass it to update all our state variables + state->update(K*res); + + } + + + /** + * @brief Initializes new variable into covariance + * @param state Pointer to state + * @param new_variable Pointer to variable to be initialized + * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian + * @param H_R Jacobian of initializing measurements wrt variables in H_order + * @param H_L Jacobian of initializing measurements wrt new variable + * @param R Covariance of initializing measurements + * @param res Residual of initializing measurements + */ + static void invertible_initialize(State* state, Type* new_variable, const std::vector &H_order, const Eigen::MatrixXd &H_R, + const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res){ + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + assert(H_L.rows() == H_L.cols()); + assert(new_variable->size() == H_L.rows()); + + Eigen::MatrixXd H_Linv = H_L.inverse(); + + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->Cov().rows(), res.rows()); + + std::vector H_id; + std::vector H_is_active; + int current_it=0; + + // Get the location in small jacobian for each measuring variable + for (Type* meas_var: H_order){ + H_id.push_back(current_it); + current_it+=meas_var->size(); + } + + //========================================================== + //========================================================== + // For each active variable find its M = P*H^T + for (Type* var: state->variables()) { + // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) + Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); + for (size_t i = 0; i < H_order.size(); i++) { + Type *meas_var = H_order[i]; + M_i += state->Cov().block(var->id(), meas_var->id(), var->size(), meas_var->size()) * + H_R.block(0, H_id[i], H_R.rows(), meas_var->size()).transpose(); + } + M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; + } + + + //========================================================== + //========================================================== + //Get covariance of this small jacobian + Eigen::MatrixXd P_small= state->get_marginal_covariance(H_order); + + // M = H_R*Cov*H_R' + R + Eigen::MatrixXd M(H_R.rows(), H_R.rows()); + M.triangularView() = H_R*P_small*H_R.transpose(); + M.triangularView() += R; + + // Covariance of the variable/landmark that will be initialized + Eigen::MatrixXd P_LL = H_Linv*M.selfadjointView()*H_Linv.transpose(); + + size_t oldSize = state->Cov().rows(); + + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->Cov().rows()+new_variable->size(), + state->Cov().rows()+new_variable->size())); + + state->Cov().block(0, oldSize, oldSize, new_variable->size()).noalias() = -M_a*H_Linv.transpose(); + state->Cov().block(oldSize, 0, new_variable->size(), oldSize) = state->Cov().block(0, oldSize, oldSize, new_variable->size()).transpose(); + state->Cov().block(oldSize, oldSize, new_variable->size(), new_variable->size()) = P_LL; + + + // Update the variable that will be initialized (invertible systems can only update the new variable. However this + // Update should be almost zero if we already used a conditional Gauss-Newton to solve for the initial estimate + new_variable->update(H_Linv*res); + + // Now collect results, and add it to the state variables + new_variable->set_local_id((int)(state->Cov().rows()-new_variable->size())); + state->insert_variable(new_variable); + + + } + + /** + * @brief Initializes new variable into covariance. Uses Givens to separate into updating and + * initializing systems (therefore system must be fed as isotropic) + * @param state Pointer to state + * @param new_variable Pointer to variable to be initialized + * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian + * @param H_R Jacobian of initializing measurements wrt variables in H_order + * @param H_L Jacobian of initializing measurements wrt new variable + * @param R Covariance of initializing measurements + * @param res Residual of initializing measurements + */ + static void initialize(State* state, Type* new_variable, const std::vector &H_order, Eigen::MatrixXd &H_R, + Eigen::MatrixXd &H_L, Eigen::MatrixXd &R, Eigen::VectorXd &res){ + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + size_t new_var_size = new_variable->size(); + assert (new_var_size == H_L.cols()); + + Eigen::JacobiRotation tempHo_GR; + for (int n = 0; n < H_L.cols(); ++n) { + for (int m = (int) H_L.rows() - 1; m > n; m--) { + // Givens matrix G + tempHo_GR.makeGivens(H_L(m - 1, n), H_L(m, n)); + // Multiply G to the corresponding lines (m-1,m) in each matrix + // Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while + // it is equivalent to applying G to the entire cols [0:Ho.cols()-1]. + (H_L.block(m - 1, n, 2, H_L.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + (res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + (H_R.block(m - 1, 0, 2, H_R.cols())).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + } + } + + + //Separate into initializing and updating portions + Eigen::MatrixXd Hxinit = H_R.block(0, 0, new_var_size, H_R.cols()); + Eigen::MatrixXd Hup = H_R.block(new_var_size, 0, H_R.rows() - new_var_size, H_R.cols()); + + Eigen::MatrixXd H_finit = H_L.block(0, 0, new_var_size, new_var_size); + + Eigen::VectorXd resinit = res.block(0, 0, new_var_size, 1); + Eigen::VectorXd resup = res.block(new_var_size, 0, res.rows() - new_var_size, 1); + + Eigen::MatrixXd Rinit = R.block(0,0,new_var_size, new_var_size); + Eigen::MatrixXd Rup = R.block(new_var_size,new_var_size, R.rows()- new_var_size, R.rows() - new_var_size); + + //=========================================== + // Finally, initialize it in our state + invertible_initialize(state, new_variable, H_order, Hxinit, H_finit, + Rinit, resinit); + + //Update with updating portion + if (Hup.rows() > 0) { + StateHelper::EKFUpdate(state, H_order, Hup, resup, Rup); + } + + + } + + /** + * + * @param state Pointer to state + * @param last_w The estimated angular velocity at cloning time (used to estimate imu-cam time offset) + */ + + static void augment_clone(State* state, Eigen::Matrix last_w) { + + auto imu = state->imu(); + auto Cov = state->Cov(); + + // Call on our marginalizer to clone, it will add it to our vector of types + // NOTE: this will clone the clone pose to the END of the covariance... + Type* posetemp = StateHelper::clone(state, imu->pose()); + + // Cast to a JPL pose type + PoseJPL* pose = dynamic_cast(posetemp); + + // Check that it was a valid cast + if(pose == nullptr) { + ROS_ERROR("INVALID OBJECT RETURNED FROM MARGINALIZER, EXITING!#!@#!@#"); + exit(EXIT_FAILURE); + } + + // Append the new clone to our clone vector + state->insert_clone(state->timestamp(), pose); + + // If we are doing time calibration, then our clones are a function of the time offset + // Logic is based on Mingyang Li and Anastasios I. Mourikis paper: + // http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 + if (state->options().do_calib_camera_timeoffset){ + // Jacobian to augment by + Eigen::Matrix dnc_dt = Eigen::MatrixXd::Zero(6,1); + dnc_dt.block(0,0,3,1)= last_w; + dnc_dt.block(3,0,3,1)= imu->vel(); + // Augment covariance with time offset Jacobian + Cov.block(0,pose->id(),Cov.rows(),6) += Cov.block(0,state->calib_dt_CAMtoIMU()->id(), + Cov.rows(),1)*dnc_dt.transpose(); + Cov.block(pose->id(),0,6,Cov.rows()) += dnc_dt*Cov.block(state->calib_dt_CAMtoIMU()->id(),0,1,Cov.rows()); + Cov.block(pose->id(),pose->id(),6,6) += dnc_dt*Cov(state->calib_dt_CAMtoIMU()->id(), + state->calib_dt_CAMtoIMU()->id())*dnc_dt.transpose(); + } + + } + + + + +}; + + + +#endif //OPEN_VINS_STATEHELPER_H diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index 82cad91ac..b5712eb07 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -1,5 +1,5 @@ #include "TrackBase.h" -#include "state/State.h" +#include "state/StateHelper.h" void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h index 7c58a3a64..feeb3a953 100644 --- a/ov_core/src/types/IMU.h +++ b/ov_core/src/types/IMU.h @@ -10,19 +10,38 @@ #include "utils/quat_ops.h" #include "PoseJPL.h" +/** + * @brief Derived Type class that implements an IMU state using a JPLPose, a vector velocity, vector gyro bias, + * and vector accel bias + * +*/ + class IMU : public Type{ public: IMU() : Type(15){ - pose = new PoseJPL(); - v = new Vec(3); - bg = new Vec(3); - ba = new Vec(3); + _pose = new PoseJPL(); + _v = new Vec(3); + _bg = new Vec(3); + _ba = new Vec(3); + + Eigen::Matrix imu0; + imu0.setZero(); + imu0(3) = 1.0; + + set_value(imu0); + set_fej(imu0); } ~IMU() { } + /** + * @brief Performs update operation using JPLQuat update for orientation, then vector updates for + * position, velocity, gyro bias, and accel bias (in that order) + * @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba) + */ + void update(const Eigen::VectorXd dx){ assert(dx.rows()== _size); @@ -44,24 +63,28 @@ class IMU : public Type{ } - //Sets the value of the estimate + /**@brief Sets the value of the estimate + * @param new_value + */ void set_value(const Eigen::VectorXd new_value){ - pose->set_value(new_value.block(0,0,7,1)); - v->set_value(new_value.block(7,0,3,1)); - bg->set_value(new_value.block(10,0,3,1)); - ba->set_value(new_value.block(13,0,3,1)); + _pose->set_value(new_value.block(0,0,7,1)); + _v->set_value(new_value.block(7,0,3,1)); + _bg->set_value(new_value.block(10,0,3,1)); + _ba->set_value(new_value.block(13,0,3,1)); _value = new_value; } - //Sets the value of the estimate + /**@brief Sets the value of the first estimate + * @param new_value + */ void set_fej(const Eigen::VectorXd new_value){ - pose->set_fej_value(new_value.block(0,0,7,1)); - v->set_fej(new_value.block(7,0,3,1)); - bg->set_fej(new_value.block(10,0,3,1)); - ba->set_fej(new_value.block(13,0,3,1)); + _pose->set_fej_value(new_value.block(0,0,7,1)); + _v->set_fej(new_value.block(7,0,3,1)); + _bg->set_fej(new_value.block(10,0,3,1)); + _ba->set_fej(new_value.block(13,0,3,1)); _fej = new_value; } @@ -73,72 +96,128 @@ class IMU : public Type{ return Clone; } - //Rotation access + /// Rotation access Eigen::Matrix Rot() const{ - return pose->Rot(); + return _pose->Rot(); } - //FEJ Rotation access + /// FEJ Rotation access Eigen::Matrix Rot_fej() const{ - return pose->Rot_fej(); + return _pose->Rot_fej(); } - //Rotation access + /// Rotation access quat Eigen::Matrix quat() const{ - return pose->quat(); + return _pose->quat(); } - //FEJ Rotation access + /// FEJ Rotation access quat Eigen::Matrix quat_fej() const{ - return pose->quat_fej(); + return _pose->quat_fej(); } - //position access + /// Position access Eigen::Matrix pos() const{ - return pose->pos(); + return _pose->pos(); } - //FEJ position access + /// FEJ position access Eigen::Matrix pos_fej() const{ - return pose->pos_fej(); + return _pose->pos_fej(); } - //Velocity access + /// Velocity access Eigen::Matrix vel() const{ - return v->value(); + return _v->value(); } - //FEJ position access + // FEJ velocity access Eigen::Matrix vel_fej() const{ - return v->fej(); + return _v->fej(); } - //gyro bias access + /// Gyro bias access Eigen::Matrix bias_g() const{ - return bg->value(); + return _bg->value(); } - //FEJ gyro bias access + /// FEJ gyro bias access Eigen::Matrix bias_g_fej() const{ - return bg->fej(); + return _bg->fej(); } - //accel bias access + /// Accel bias access Eigen::Matrix bias_a() const{ - return ba->value(); + return _ba->value(); } - //FEJ accel bias access + // FEJ accel bias access Eigen::Matrix bias_a_fej() const{ - return ba->fej(); + return _ba->fej(); + } + + /** @brief Used to find the components inside the IMU if needed + * @param check variable to find + */ + Type* check_if_same_variable(Type* check){ + if (check== this){ + return this; + } + else if (check == _pose){ + return _pose; + } + else if (check == q()){ + return q(); + } + else if (check == p()){ + return p(); + } + else if (check == _v){ + return _v; + } + else if (check == _bg){ + return bg(); + } + else if (check == _ba){ + return ba(); + } + else{ + return nullptr; + } + } + + PoseJPL *pose(){ + return _pose; + } + JPLQuat *q(){ + return _pose->q(); + } + Vec *p(){ + return _pose->p(); + } + Vec *v(){ + return _v; + } + Vec *bg(){ + return _bg; + } + Vec *ba(){ + return _ba; } protected: - PoseJPL *pose; - Vec *v; - Vec *bg; - Vec *ba; + /// Pose subvariable + PoseJPL *_pose; + + /// Velocity subvariable + Vec *_v; + + /// Gyro bias subvariable + Vec *_bg; + + /// Accel bias subvariable + Vec *_ba; }; \ No newline at end of file diff --git a/ov_core/src/types/JPLQuat.h b/ov_core/src/types/JPLQuat.h index fe4599102..3eadcd8de 100644 --- a/ov_core/src/types/JPLQuat.h +++ b/ov_core/src/types/JPLQuat.h @@ -10,27 +10,54 @@ #include "utils/quat_ops.h" #include "Type.h" +/** + * @brief Derived Type class that implements the JPL convention quaternion with left-multiplicative error state + * +*/ + class JPLQuat : public Type{ public: - JPLQuat() : Type(3){} + /** + * @brief Quaternion constructor + * + */ + + JPLQuat() : Type(3){ + Eigen::Matrix q0; + q0.setZero(); + q0(3) = 1.0; + set_value(q0); + set_fej(q0); + } ~JPLQuat() { } + /** + * @brief Implements update operation by left-multiplying the current quaternion with a quaternion built + * from a small axis-angle perturbation + * @param dx Axis-angle representation of the perturbing quaternion + */ + void update(const Eigen::VectorXd dx){ assert(dx.rows()== _size); + //Build perturbing quaternion Eigen::Matrix dq; dq << .5*dx , 1.0; dq= dq/dq.norm(); + //Update estimate and recompute R set_value(quat_multiply(dq, _value)); } - //Sets the value of the estimate + /** + * @brief Sets the value of the estimate and recompute the rotation matrix + * @param new_value New value for the quaternion estimate + */ void set_value(const Eigen::VectorXd new_value){ assert(new_value.rows() == 7); @@ -41,7 +68,10 @@ class JPLQuat : public Type{ _R = quat_2_Rot(new_value); } - //Sets the value of the estimate + /** + * @brief Sets the fej value and recomputes the fej rotation matrix + * @param new_value New value for the quaternion estimate + */ void set_fej_value(const Eigen::VectorXd new_value){ assert(new_value.rows() == 7); @@ -58,12 +88,12 @@ class JPLQuat : public Type{ return Clone; } - //Rotation access + ///Rotation access Eigen::Matrix Rot() const{ return _R; } - //FEJ Rotation access + ///FEJ Rotation access Eigen::Matrix Rot_fej() const{ return _Rfej; } diff --git a/ov_core/src/types/PoseJPL.h b/ov_core/src/types/PoseJPL.h index 8337b3fd4..20dfdc726 100644 --- a/ov_core/src/types/PoseJPL.h +++ b/ov_core/src/types/PoseJPL.h @@ -9,17 +9,36 @@ #include "JPLQuat.h" #include "Vec.h" + +/** + * @brief Derived Type class that implements a pose (orientation plus position) with a JPL quaternion representation for + * the orientation + * +*/ + class PoseJPL : public Type{ public: PoseJPL() : Type(6){ - q = new JPLQuat(); - p = new Vec(3); + + //Initialize subvariables + _q = new JPLQuat(); + _p = new Vec(3); + + Eigen::Matrix pose0; + pose0.setZero(); + pose0(3) = 1.0; + set_value(pose0); + set_fej(pose0); } ~PoseJPL() { } + /** + *@brief Update q and p using a the JPLQuat update for orientation and vector update for position + * @param dx Correction vector (orientation then position) + */ void update(const Eigen::VectorXd dx){ assert(dx.rows()== _size); @@ -30,7 +49,10 @@ class PoseJPL : public Type{ dq << .5*dx.block(0,0,3,1) , 1.0; dq= dq/dq.norm(); + //Update orientation newX.block(0,0,4,1) = quat_multiply(dq, quat()); + + //Update position newX.block(4,0,3,1) += dx.block(3,0,3,1); set_value(newX); @@ -42,18 +64,24 @@ class PoseJPL : public Type{ assert(new_value.rows() == 7); - q->set_value(new_value.block(0,0,4,1)); - p->set_value(new_value.block(4,0,3,1)); + //Set orientation value + _q->set_value(new_value.block(0,0,4,1)); + + //Set position value + _p->set_value(new_value.block(4,0,3,1)); _value = new_value; } - //Sets the value of the estimate + //Sets the value of the first estimate void set_fej_value(const Eigen::VectorXd new_value){ assert(new_value.rows() == 7); - q->set_fej(new_value.block(0,0,4,1)); - p->set_fej(new_value.block(4,0,3,1)); + //Set orientation fej value + _q->set_fej(new_value.block(0,0,4,1)); + + //Set position fej value + _p->set_fej(new_value.block(4,0,3,1)); _fej = new_value; } @@ -65,41 +93,70 @@ class PoseJPL : public Type{ return Clone; } - //Rotation access + /// Rotation access Eigen::Matrix Rot() const{ - return q->Rot(); + return _q->Rot(); } - //FEJ Rotation access + /// FEJ Rotation access Eigen::Matrix Rot_fej() const{ - return q->Rot_fej();; + return _q->Rot_fej();; } - //Rotation access + /// Rotation access as quaternion Eigen::Matrix quat() const{ - return q->value(); + return _q->value(); } - //FEJ Rotation access + /// FEJ Rotation access as quaternion Eigen::Matrix quat_fej() const{ - return q->fej(); + return _q->fej(); } - //position access + /// Position access Eigen::Matrix pos() const{ - return p->value(); + return _p->value(); } - //FEJ position access + // FEJ position access Eigen::Matrix pos_fej() const{ - return p->fej(); + return _p->fej(); + } + + /** @brief Used to find the components inside the Pose if needed + * @param check variable to find + */ + Type* check_if_same_variable(Type* check){ + if (check== this){ + return this; + } + else if (check == _q){ + return q(); + } + else if (check == _p){ + return p(); + } + else{ + return nullptr; + } + } + + JPLQuat * q(){ + return _q; + } + + Vec * p(){ + return _p; } protected: - JPLQuat *q; - Vec *p; + ///Subvariable containing orientation + JPLQuat *_q; + + ///Subvariable containg position + Vec * _p; }; diff --git a/ov_core/src/types/Type.h b/ov_core/src/types/Type.h index 6eafe8495..a5ccf4dfb 100644 --- a/ov_core/src/types/Type.h +++ b/ov_core/src/types/Type.h @@ -7,54 +7,93 @@ #include +/** + * @brief Base class for estimated variables. + * + * This class is used how variables are represented or updated (e.g., vectors or quaternions) + * +*/ class Type { public: + /** + * @brief Default constructor for our Type + * @param size_ degrees of freedom of variable (i.e., the size of the error state) + */ + + Type(int size_){_size= size_;} virtual ~Type() { }; - //Set id of variable in covariance + /** + * @brief Sets id used to track location of variable in the filter covariance + * @param new_id entry in filter covariance corresponding to this variable + */ virtual void set_local_id(int new_id){ _id= new_id; } + /** + * @brief Access to variable id + */ int id(){ return _id; } + /** + * @brief Access to variable size + */ int size(){ return _size; } - //Update estimate using local perturbation 0 + /** + * @brief Update variable due to perturbation of error state + * @param dx Perturbation used to update the variable through a defined "boxplus" operation + */ virtual void update(const Eigen::VectorXd dx) = 0; - //Returns the estimate + /** + * @brief Access variable's estimate + */ virtual Eigen::VectorXd value() const{ return _value; } - //Returns the first estimate for fej + /** + * @brief Access variable's first-estimate + */ virtual Eigen::VectorXd fej() const{ return _fej; } - //Sets the value of the estimate + /** + * @brief Overwrite value of state's estimate + * @param new_value New value that will overwrite state's value + */ virtual void set_value(const Eigen::VectorXd new_value){ _value = new_value; } - //Sets the value of the estimate + /** + * @brief Overwrite value of first-estimate + * @param new_value New value that will overwrite state's fej + */ virtual void set_fej(const Eigen::VectorXd new_value){ _fej = new_value; } + /** + * @brief Create a clone of this variable + */ virtual Type* clone()=0; - //Used to find if variable is this one. Returns nullptr if not. - // Overloaded to find subvariables as well + /** + * @brief Determine if "check" is the same variable + * @param check Type pointer to compare to + */ virtual Type* check_if_same_variable(const Type* check){ if (check == this){ return this; @@ -66,10 +105,16 @@ class Type { protected: + /// First-estimate Eigen::VectorXd _fej; + + /// Current best estimate Eigen::VectorXd _value; + /// Location of error state in covariance int _id = -1; + + /// Dimension of error state int _size = -1; diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h index 37732fbb4..643d0d964 100644 --- a/ov_core/src/types/Vec.h +++ b/ov_core/src/types/Vec.h @@ -7,20 +7,43 @@ #include "Type.h" +/** + * @brief Derived Type class that implements vector variables + * +*/ + class Vec: public Type { public: - Vec(int dim) : Type(dim){} + + /** + * @brief Default constructor for Vec + * @param dim Size of the vector (will be same as error state) + */ + Vec(int dim) : Type(dim){ + _value = Eigen::VectorXd::Zero(dim); + _fej = Eigen::VectorXd::Zero(dim); + } ~Vec() { } + + /** + * @brief Implements the update operation through standard vector addition + * @param dx Additive error state correction + */ void update(const Eigen::VectorXd dx){ assert(dx.rows() == _size); set_value(_value + dx); } + + + /** + * @brief Performs all the cloning + */ Type* clone(){ Type* Clone= new Vec(_size); Clone->set_value(value()); From bf68f30b89ff41d29231fb5daed908ebff41feb8 Mon Sep 17 00:00:00 2001 From: Your Name Date: Tue, 4 Jun 2019 14:58:50 -0400 Subject: [PATCH 4/6] Added propagator --- ov_core/CMakeLists.txt | 4 +- ov_core/src/state/Options.h | 18 +- ov_core/src/state/Propagator.cpp | 291 +++++++++++++++++++++++++++++++ ov_core/src/state/Propagator.h | 149 ++++++++++++++++ ov_core/src/state/State.cpp | 81 +++++++++ ov_core/src/state/State.h | 127 ++++++-------- ov_core/src/state/StateHelper.h | 113 ++++++++++-- ov_core/src/track/TrackBase.cpp | 2 +- ov_core/src/types/IMU.h | 2 +- ov_core/src/utils/quat_ops.h | 54 ++++++ 10 files changed, 743 insertions(+), 98 deletions(-) create mode 100644 ov_core/src/state/Propagator.cpp create mode 100644 ov_core/src/state/Propagator.h create mode 100644 ov_core/src/state/State.cpp diff --git a/ov_core/CMakeLists.txt b/ov_core/CMakeLists.txt index d3e965de8..057be67e1 100644 --- a/ov_core/CMakeLists.txt +++ b/ov_core/CMakeLists.txt @@ -67,7 +67,9 @@ add_library(ov_core src/track/TrackAruco.cpp src/track/TrackDescriptor.cpp src/track/TrackKLT.cpp - src/types/Type.h src/types/Vec.h src/types/JPLQuat.h src/types/PoseJPL.h src/types/IMU.h src/state/State.h) + src/state/State.cpp + src/state/Propagator.cpp + ) target_link_libraries(ov_core ${thirdparty_libraries}) diff --git a/ov_core/src/state/Options.h b/ov_core/src/state/Options.h index 9f907a2c5..16464f1b6 100644 --- a/ov_core/src/state/Options.h +++ b/ov_core/src/state/Options.h @@ -10,23 +10,29 @@ struct Options{ + /// Bool to determine whether or not to do fej + bool do_fej = false; + + /// Bool to determine whether or not use imu message averaging + bool imu_avg = false; + /// Bool to determine whether or not to calibrate imu-to-camera pose - bool do_calib_camera_pose; + bool do_calib_camera_pose = false; /// Bool to determine whether or not to calibrate camera intrinsics - bool do_calib_camera_intrinsics; + bool do_calib_camera_intrinsics = false; /// Bool to determine whether or not to calibrate camera to IMU time offset - bool do_calib_camera_timeoffset; + bool do_calib_camera_timeoffset = false; /// Max clone size of sliding window - size_t max_clone_size; + size_t max_clone_size = 8; /// Max number of estimated SLAM features - size_t max_slam_features; + size_t max_slam_features = 0; /// Number of cameras - size_t num_cameras; + size_t num_cameras = 1; }; diff --git a/ov_core/src/state/Propagator.cpp b/ov_core/src/state/Propagator.cpp new file mode 100644 index 000000000..8d94675b9 --- /dev/null +++ b/ov_core/src/state/Propagator.cpp @@ -0,0 +1,291 @@ +// +// Created by keck on 6/4/19. +// + +#include "Propagator.h" + + +bool Propagator::propagateAndClone(State* state, double timestamp){ + + // First lets construct an IMU vector of measurements we need + vector prop_data; + + // Get what our IMU-camera offset should be (t_imu = t_cam + calib_dt) + double t_off_new = state->calib_dt_CAMtoIMU()->value()(0,0); + + //=================================================================================== + //=================================================================================== + //=================================================================================== + + // Ensure we have some measurements in the first place! + if(imu_data.empty()) { + //ROS_ERROR("[TIME-SYNC]: There are no IMU measurements!!!!!"); + //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); + //ROS_ERROR("%s on line %d",__FILE__,__LINE__); + //std::exit(EXIT_FAILURE); + state->set_timestamp(timestamp); + last_prop_time_offset = t_off_new; + return false; + } + + // If the difference between the current update time and state is zero + // We should crash, as this means we would have two clones at the same time!!!! + if(state->timestamp() == timestamp) { + //ROS_ERROR("[TIME-SYNC]: Propagation called again at same timestep at last update timestep!!!!"); + //ROS_ERROR("[TIME-SYNC]: %.15f vs %.15f timestamps",state->timestamp,timestamp); + //ROS_ERROR("%s on line %d",__FILE__,__LINE__); + std::exit(EXIT_FAILURE); + } + + // Loop through and find all the needed measurements to propagate with + // Note we split measurements based on the given state time, and the update timestamp + for(size_t i=0; i state->timestamp()+last_prop_time_offset && imu_data.at(i).timestamp <= + state->timestamp()+last_prop_time_offset) { + IMUDATA data = interpolate_data(imu_data.at(i),imu_data.at(i+1),state->timestamp()+last_prop_time_offset); + prop_data.push_back(data); + //ROS_INFO("propagation #%d = CASE 1 = %.3f => %.3f", + // (int)i,data.timestamp-prop_data.at(0).timestamp,state->timestamp-prop_data.at(0).timestamp); + continue; + } + + // MIDDLE OF INTEGRATION PERIOD + // If our imu measurement is right in the middle of our propagation period + // Then we should just append the whole measurement time to our propagation vector + if(imu_data.at(i).timestamp >= state->timestamp()+last_prop_time_offset && imu_data.at(i+1).timestamp <= + timestamp+t_off_new) { + prop_data.push_back(imu_data.at(i)); + //ROS_INFO("propagation #%d = CASE 2 = %.3f",(int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp); + continue; + } + + // END OF THE INTEGRATION PERIOD + // If the current timestamp is greater then our update time + // We should just "split" the NEXT IMU measurement to the update time, + // NOTE: we add the current time, and then the time at the end of the inverval (so we can get a dt) + // NOTE: we also break out of this loop, as this is the last IMU measurement we need! + if(imu_data.at(i+1).timestamp > timestamp+t_off_new) { + prop_data.push_back(imu_data.at(i)); + // If the added IMU message doesn't end exactly at the camera time + // Then we need to add another one that is right at the ending time + if(prop_data.at(prop_data.size()-1).timestamp != timestamp+t_off_new) { + IMUDATA data = interpolate_data(imu_data.at(i), imu_data.at(i + 1), timestamp + t_off_new); + prop_data.push_back(data); + } + //ROS_INFO("propagation #%d = CASE 3 = %.3f => %.3f", + // (int)i,data.timestamp-prop_data.at(0).timestamp,data.timestamp-prop_data.at(0).timestamp); + break; + } + + } + + // Check that we have at least one measurement to propagate with + if(prop_data.empty()) { + //ROS_ERROR("[TIME-SYNC]: There are not enough measurements to propagate with %d of 2",(int)prop_data.size()); + //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); + //ROS_ERROR("%s on line %d",__FILE__,__LINE__); + //std::exit(EXIT_FAILURE); + state->set_timestamp(timestamp); + last_prop_time_offset = t_off_new; + return false; + } + + + // If we did not reach the whole integration period (i.e., the last inertial measurement we have is smaller then the time we want to reach) + // Then we should just "stretch" the last measurement to be the whole period + if(imu_data.at(imu_data.size()-1).timestamp < timestamp+t_off_new) { + IMUDATA data = interpolate_data(imu_data.at(imu_data.size()-2),imu_data.at(imu_data.size()-1),timestamp+t_off_new); + prop_data.push_back(data); + //ROS_INFO("propagation #%d = CASE 4 = %.3f",(int)(imu_data.size()-1),data.timestamp-prop_data.at(0).timestamp); + } + + //ROS_INFO("end_prop - start_prop %.3f",prop_data.at(prop_data.size()-1).timestamp-prop_data.at(0).timestamp); + //ROS_INFO("imu_data_last - state %.3f",imu_data.at(imu_data.size()-1).timestamp-state->timestamp()); + //ROS_INFO("update_time - state %.3f",timestamp-state->timestamp()); + + // Loop through and ensure we do not have an zero dt values + // This would cause the noise covariance to be Infinity + for (size_t i=0; i < prop_data.size()-1; i++){ + //cout << "dti- " << (prop_data.at(i+1).timestamp-prop_data.at(i).timestamp) << endl; + if ((prop_data.at(i+1).timestamp-prop_data.at(i).timestamp) < 1e-8){ + //ROS_ERROR("[TIME-SYNC]: Zero DT between %d and %d measurements (dt = %.8f)",(int)i,(int)(i+1),(prop_data.at(i+1).timestamp-prop_data.at(i).timestamp)); + return false; + prop_data.erase(prop_data.begin()+i); + i--; + } + } + + + // Check that we have at least one measurement to propagate with + if(prop_data.empty()) { + //ROS_ERROR("[TIME-SYNC]: There are not enough measurements to propagate with %d of 2",(int)prop_data.size()); + //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); + //ROS_ERROR("%s on line %d",__FILE__,__LINE__); + //std::exit(EXIT_FAILURE); + state->set_timestamp(timestamp); + last_prop_time_offset = t_off_new; + return false; + } + + + Eigen::Matrix Phi_summed = Eigen::Matrix::Identity(); + Eigen::Matrix Qd_summed = Eigen::Matrix::Zero(); + + for(size_t i=0; i last_w = prop_data.at(prop_data.size()-1).wm - state->imu()->bias_g(); + + auto Cov = state->Cov(); + + size_t imu_id = 0; + // Do the update to the covariance with our "summed" state transition and IMU noise addition... + Cov.block(imu_id,0,15,state->nVars()) = Phi_summed*Cov.block(imu_id,0,15,state->nVars()); + Cov.block(0,imu_id,state->nVars(),15) = Cov.block(0,imu_id,state->nVars(),15)*Phi_summed.transpose(); + Cov.block(imu_id,imu_id,15,15) += Qd_summed; + + // Ensure the covariance is symmetric + Cov = 0.5*(Cov+Cov.transpose()); + + //Set timestamp data + state->set_timestamp(timestamp); + last_prop_time_offset = t_off_new; + + + //Now perform stochastic cloning + StateHelper::augment_clone(state, last_w); + +} + +void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_minus, const IMUDATA data_plus, + Eigen::Matrix &F, Eigen::Matrix &Qd){ + + auto imu = state->imu(); + + // Time elapsed over interval + double dt = data_plus.timestamp-data_minus.timestamp; + + Eigen::Matrix w_hat = data_minus.wm - imu->bias_g(); + Eigen::Matrix a_hat = data_minus.am - imu->bias_a(); + + + if (state->options().imu_avg){ + Eigen::Matrix w_hat2 = data_plus.wm - imu->bias_g(); + Eigen::Matrix a_hat2 = data_plus.am - imu->bias_a(); + + w_hat = .5*(w_hat+w_hat2); + a_hat = .5*(a_hat+a_hat2); + } + + // Pre-compute things + double w_norm = w_hat.norm(); + Eigen::Matrix I_4x4 = Eigen::Matrix::Identity(); + + // Orientation: Equation (101) and (103) and of Trawny indirect TR + Eigen::Matrix bigO; + if(w_norm > 1e-3) { + bigO = cos(0.5*w_norm*dt)*I_4x4 + 1/w_norm*sin(0.5*w_norm*dt)*Omega(w_hat); + } else { + bigO = I_4x4 + 0.5*dt*Omega(w_hat); + } + Eigen::Matrix new_q = quatnorm(bigO*imu->quat()); + + Eigen::Matrix R_Gtoi = imu->Rot(); + + // Velocity: just the acceleration in the local frame, minus global gravity + Eigen::Matrix new_v = imu->vel() + R_Gtoi.transpose()*a_hat*dt - _gravity*dt; + + // Position: just velocity times dt, with the acceleration integrated twice + Eigen::Matrix new_p = imu->pos() + imu->vel()*dt + 0.5*R_Gtoi.transpose()*a_hat*dt*dt - + 0.5*_gravity*dt*dt; + + //Indexing within Jacobian + int th_id = 0; + int p_id = 3; + int v_id = 6; + int bg_id = 9; + int ba_id = 12; + + //Allocate noise Jacobian + Eigen::Matrix G = Eigen::Matrix::Zero(); + + + //Now compute Jacobian of new state wrt old state and noise + if (state->options().do_fej) { + + Eigen::Matrix Rfej = imu->Rot_fej(); + Eigen::Matrix dR = quat_2_Rot(new_q)*Rfej.transpose(); + + Eigen::Matrix v_fej = imu->vel_fej(); + Eigen::Matrix p_fej = imu->pos_fej(); + + + F.block(th_id, th_id, 3, 3) = dR; + F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-w_hat * dt) * dt; + F.block(bg_id, bg_id, 3, 3).setIdentity(); + F.block(v_id, th_id, 3, 3).noalias() = -skew_x(new_v-v_fej+_gravity*dt)*Rfej.transpose(); + F.block(v_id, v_id, 3, 3).setIdentity(); + F.block(v_id, ba_id, 3, 3) = -Rfej.transpose() * dt; + F.block(ba_id, ba_id, 3, 3).setIdentity(); + F.block(p_id, th_id, 3, 3).noalias() = + -skew_x(new_p-p_fej-v_fej*dt+.5*_gravity*dt*dt)*Rfej.transpose(); + F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; + F.block(p_id, ba_id, 3, 3) = -0.5 * Rfej.transpose() * dt * dt; + F.block(p_id, p_id, 3, 3).setIdentity(); + + G.block(th_id, 0, 3, 3) = -dR * Jr_so3(-w_hat * dt) * dt; + G.block(bg_id, 3, 3, 3).setIdentity(); + G.block(v_id, 6, 3, 3) = -Rfej.transpose() * dt; + G.block(ba_id, 9, 3, 3).setIdentity(); + G.block(p_id, 6, 3, 3) = -.5 * Rfej.transpose() * dt * dt; + + } else { + + F.block(th_id, th_id, 3, 3) = Exp(-w_hat * dt); + F.block(th_id, bg_id, 3, 3).noalias() = -Exp(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt; + F.block(bg_id, bg_id, 3, 3).setIdentity(); + F.block(v_id, th_id, 3, 3).noalias() = -R_Gtoi.transpose() * skew_x(a_hat * dt); + F.block(v_id, v_id, 3, 3).setIdentity(); + F.block(v_id, ba_id, 3, 3) = -R_Gtoi.transpose() * dt; + F.block(ba_id, ba_id, 3, 3).setIdentity(); + F.block(p_id, th_id, 3, 3).noalias() = + -0.5 *R_Gtoi.transpose() * skew_x(a_hat * dt * dt); + F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; + F.block(p_id, ba_id, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt; + F.block(p_id, p_id, 3, 3).setIdentity(); + + G.block(th_id, 0, 3, 3) = -Exp(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt; + G.block(bg_id, 3, 3, 3).setIdentity(); + G.block(v_id, 6, 3, 3) = -imu->Rot().transpose() * dt; + G.block(ba_id, 9, 3, 3).setIdentity(); + G.block(p_id, 6, 3, 3) = -.5 * imu->Rot().transpose() * dt * dt; + } + + + // Get noise covariance + Eigen::Matrix Qc = Eigen::Matrix::Zero(); + Qc.block(0,0,3,3) = _noises.sigma_w_2/dt*Eigen::Matrix::Identity(); + Qc.block(3,3,3,3) = _noises.sigma_wb_2*dt*Eigen::Matrix::Identity(); + Qc.block(6,6,3,3) = _noises.sigma_a_2/dt*Eigen::Matrix::Identity(); + Qc.block(9,9,3,3) = _noises.sigma_ab_2*dt*Eigen::Matrix::Identity(); + + // Compute the noise injected into the state over the interval + Qd = G*Qc*G.transpose(); + + //Now replace imu estimate and fej with propagated values + Eigen::Matrix imu_x = imu->value(); + imu_x.block(0,0,4,1) = new_q; + imu_x.block(4,0,3,1) = new_p; + imu_x.block(7,0,3,1) = new_v; + imu->set_value(imu_x); + imu->set_fej(imu_x); + +} diff --git a/ov_core/src/state/Propagator.h b/ov_core/src/state/Propagator.h new file mode 100644 index 000000000..7789dda88 --- /dev/null +++ b/ov_core/src/state/Propagator.h @@ -0,0 +1,149 @@ +// +// Created by keck on 6/4/19. +// + +#ifndef OPEN_VINS_PROPAGATOR_H +#define OPEN_VINS_PROPAGATOR_H + +#include "state/StateHelper.h" +#include "utils/quat_ops.h" + + +/**@brief Structure containing the IMU data + */ +struct IMUDATA { + /// timestamp of the reading + double timestamp; + + //Gyroscope reading + Eigen::Matrix wm; + + //Accellerometer reading + Eigen::Matrix am; +}; + +/**@brief Structure containing the IMU noise parameters + */ +struct NoiseManager{ + + /// Gyro white noise covariance + double sigma_w_2; + + /// Gyro random walk covariance + double sigma_wb_2; + + /// Accel white noise covariance + double sigma_a_2; + + /// Accel random walk covariance + double sigma_ab_2; + +}; + +/**@brief Performs the covariance and mean propagation using IMU measurements + */ +class Propagator{ + +public: + + Propagator(NoiseManager noises_, Eigen::Matrix gravity_) : _noises(noises_), + _gravity(gravity_){} + + + /** + * Stores incoming readings + * @param timestamp Timestamp of reading + * @param wm Gyro reading + * @param am Accelerometer reading + */ + void feed_imu(double timestamp, Eigen::Matrix wm, Eigen::Matrix am) { + + // Create our imu data object + IMUDATA data; + data.timestamp = timestamp; + data.wm = wm; + data.am = am; + + // Append it to our vector + imu_data.emplace_back(data); + + } + + + /** @brief Propagate state up to given timestamp and then clone + * @param state Pointer to state + * @timestamp Pointer to progate to and clone at + */ + bool propagateAndClone(State* state, double timestamp); + +protected: + + /// Estimate for time offset at last propagation time + double last_prop_time_offset = 0; + + /** + * @brief Propagates the state forward using the IMU data and computes the noise covariance and state-transition + * matrix of this interval. This function can be replaced with analytical/numerical integration or when using a + * different state representation + * @param imu Pointer to imu + * @param data_minus IMU readings at beginning of interval + * @param data_plus IMU readings at end of interval + * @param F State-transition matrix over the interval + * @param Qd Discrete-time noise covariance over the interval + */ + void predictAndcomputeJacobians(State *state, const IMUDATA data_minus, const IMUDATA data_plus, + Eigen::Matrix &F, Eigen::Matrix &Qd); + + + void propagateSingleInterval(State *state, const IMUDATA &data_minus, const IMUDATA &data_plus, + Eigen::Matrix &Phi, Eigen::Matrix &Qd){ + + Eigen::Matrix F = Eigen::Matrix::Zero(); + Eigen::Matrix Qdi = Eigen::Matrix::Zero(); + + predictAndcomputeJacobians(state, data_minus, data_plus, F, Qdi); + + Phi = F*Phi; + Qd = F*Qd*F.transpose()+Qdi; + } + + /// Container for the noise values + NoiseManager _noises; + + /// Our history of IMU messages (time, angular, linear) + std::vector imu_data; + + /// Gravity vector + + Eigen::Matrix _gravity; + + + /** + * @brief Nice helper function that will linearly interpolate between two imu messages + * This should be used instead of just "cutting" imu messages that bound the camera times + * Give better time offset if we use this function.... + * @param imu_1 IMU at beggining of interpolation interval + * @param imu_2 IMU at end of interpolation interval + * @param timestamp Timestamp being interpolated to + */ + IMUDATA interpolate_data(const IMUDATA imu_1, const IMUDATA imu_2, double timestamp) { + // time-distance lambda + double lambda = (timestamp-imu_1.timestamp)/(imu_2.timestamp-imu_1.timestamp); + //cout << "lambda - " << lambda << endl; + // interpolate between the two times + IMUDATA data; + data.timestamp = timestamp; + data.am = (1-lambda)*imu_1.am+lambda*imu_2.am; + data.wm = (1-lambda)*imu_1.wm+lambda*imu_2.wm; + return data; + } + + + + +}; + + + + +#endif //OPEN_VINS_PROPAGATOR_H diff --git a/ov_core/src/state/State.cpp b/ov_core/src/state/State.cpp new file mode 100644 index 000000000..0a02ec55f --- /dev/null +++ b/ov_core/src/state/State.cpp @@ -0,0 +1,81 @@ +// +// Created by keck on 6/4/19. +// + +#include "State.h" + +Eigen::MatrixXd State::get_marginal_covariance(const std::vector &small_variables){ + + // Calculate the marginal covariance size we need ot make our matrix + int cov_size=0; + for (size_t i=0; i < small_variables.size(); i++){ + cov_size += small_variables[i]->size(); + } + + // Construct our return covariance + Eigen::MatrixXd Small_cov(cov_size,cov_size); + + // For each variable, lets copy over all other variable cross terms + // Note: this copies over itself to when i_index=k_index + int i_index=0; + for (size_t i=0; i < small_variables.size(); i++){ + int k_index=0; + for (size_t k=0; k < small_variables.size(); k++){ + Small_cov.block(i_index, k_index, small_variables[i]->size(),small_variables[k]->size()) = + _Cov.block(small_variables[i]->id(), small_variables[k]->id(), small_variables[i]->size(), + small_variables[k]->size()); + k_index += small_variables[k]->size(); + } + i_index += small_variables[i]->size(); + } + + // Return the covariance + return Small_cov; +} + +void State::initialize_variables(){ + + double current_id = 0; + _imu = new IMU(); + _imu->set_local_id(current_id); + insert_variable(_imu); + + current_id += 15; + + //Camera to IMU time offset + _calib_dt_CAMtoIMU = new Vec(1); + if (_options.do_calib_camera_timeoffset){ + _calib_dt_CAMtoIMU->set_local_id(current_id); + insert_variable(_calib_dt_CAMtoIMU); + current_id++; + } + + for (size_t i = 0; i < _options.num_cameras; i++){ + //Allocate pose + PoseJPL *pose = new PoseJPL(); + //Allocate intrinsics + Vec *intrin = new Vec(8); + + //Add these to the corresponding maps + _calib_IMUtoCAM.insert({i, pose}); + _cam_intrinsics.insert({i, intrin}); + + //If calibrating camera-imu pose, add to variables + if (_options.do_calib_camera_pose){ + pose->set_local_id(current_id); + current_id += 6; + insert_variable(pose); + } + //If calibrating camera intrinsics, add to variables + if (_options.do_calib_camera_intrinsics){ + intrin->set_local_id(current_id); + current_id += 8; + insert_variable(intrin); + } + } + + _Cov = Eigen::MatrixXd::Zero(current_id, current_id); + + + +} \ No newline at end of file diff --git a/ov_core/src/state/State.h b/ov_core/src/state/State.h index 20899f29f..21471793c 100644 --- a/ov_core/src/state/State.h +++ b/ov_core/src/state/State.h @@ -11,11 +11,11 @@ #include "Options.h" #include -class Landmark; - /** @brief Class which manages the filter state */ +class Landmark; + class State{ public: @@ -31,52 +31,7 @@ class State{ * @brief Initializes pointers and covariance * TODO: Read initial values and covariance from options */ - void initialize_variables(){ - - double current_id = 0; - _imu = new IMU(); - _imu->set_local_id(current_id); - insert_variable(_imu); - - current_id += 15; - - //Camera to IMU time offset - _calib_dt_CAMtoIMU = new Vec(1); - if (_options.do_calib_camera_timeoffset){ - _calib_dt_CAMtoIMU->set_local_id(current_id); - insert_variable(_calib_dt_CAMtoIMU); - current_id++; - } - - for (size_t i = 0; i < _options.num_cameras; i++){ - //Allocate pose - PoseJPL *pose = new PoseJPL(); - //Allocate intrinsics - Vec *intrin = new Vec(8); - - //Add these to the corresponding maps - _calib_IMUtoCAM.insert({i, pose}); - _cam_intrinsics.insert({i, intrin}); - - //If calibrating camera-imu pose, add to variables - if (_options.do_calib_camera_pose){ - pose->set_local_id(current_id); - current_id += 6; - insert_variable(pose); - } - //If calibrating camera intrinsics, add to variables - if (_options.do_calib_camera_intrinsics){ - intrin->set_local_id(current_id); - current_id += 8; - insert_variable(intrin); - } - } - - _Cov = Eigen::MatrixXd::Zero(current_id, current_id); - - - - } + void initialize_variables(); /** @brief Access IMU pointer @@ -99,6 +54,13 @@ class State{ return _variables; } + /** @brief Access variables in state by index in variables vector + * Index in variables vector + */ + Type* variables(size_t i){ + return _variables[i]; + } + /** @brief Insert new variable * @param newType Variable to insert */ @@ -111,34 +73,7 @@ class State{ * That only includes the ones specified with all cross terms * @param small_variables Vector of variables whose marginal covariance is desired */ - Eigen::MatrixXd get_marginal_covariance(const std::vector &small_variables){ - - // Calculate the marginal covariance size we need ot make our matrix - int cov_size=0; - for (size_t i=0; i < small_variables.size(); i++){ - cov_size += small_variables[i]->size(); - } - - // Construct our return covariance - Eigen::MatrixXd Small_cov(cov_size,cov_size); - - // For each variable, lets copy over all other variable cross terms - // Note: this copies over itself to when i_index=k_index - int i_index=0; - for (size_t i=0; i < small_variables.size(); i++){ - int k_index=0; - for (size_t k=0; k < small_variables.size(); k++){ - Small_cov.block(i_index, k_index, small_variables[i]->size(),small_variables[k]->size()) = - _Cov.block(small_variables[i]->id(), small_variables[k]->id(), small_variables[i]->size(), - small_variables[k]->size()); - k_index += small_variables[k]->size(); - } - i_index += small_variables[i]->size(); - } - - // Return the covariance - return Small_cov; - } + Eigen::MatrixXd get_marginal_covariance(const std::vector &small_variables); /** * @brief Given an update vector, updates each variable @@ -159,6 +94,7 @@ class State{ _clones_IMU.insert({timestamp, pose}); } + /// Access current timestamp double timestamp(){ return _timestamp; @@ -182,6 +118,43 @@ class State{ return _clones_IMU[timestamp]; } + /// Get size of covariance + size_t nVars(){ + return _Cov.rows(); + } + + /// Get current number of clones + size_t nClones(){ + return _clones_IMU.size(); + } + + /// Get marginal timestep + double margtimestep() { + double time = INFINITY; + for(std::pair& clone_imu : _clones_IMU) { + if(clone_imu.first < time) { + time = clone_imu.first; + } + } + return time; + } + + /** + * Erases clone associated with timestamp + * @param timestamp Timestamp of clone to erase + */ + void erase_clone(double timestamp){ + _clones_IMU.erase(timestamp); + } + + /** + * Set the current timestamp of the filter + * @param timestamp New timestamp + */ + void set_timestamp(double timestamp){ + _timestamp = timestamp; + } + protected: /// Current timestamp @@ -215,7 +188,7 @@ class State{ /// Camera intrinsics std::map _cam_intrinsics; - + /// Vector of variables std::vector _variables; diff --git a/ov_core/src/state/StateHelper.h b/ov_core/src/state/StateHelper.h index 48a53b466..7756f28ab 100644 --- a/ov_core/src/state/StateHelper.h +++ b/ov_core/src/state/StateHelper.h @@ -14,6 +14,95 @@ class StateHelper{ public: + /** @brief Marginalizes Type marg, properly modifying the ordering/covariances in the state + * @param state Pointer to state + * @param marg Pointer to variable to marginalize + */ + static void marginalize(State* state, Type* marg){ + + // Check if the current state has the GPS enabled + if(std::find(state->variables().begin(),state->variables().end(),marg) == state->variables().end()) { + std::cerr << "CovManager::marginalize() - Called on variable that is not in the state" << std::endl; + std::cerr << "CovManager::marginalize() - Marginalization, does NOT work on sub-variables yet..." << std::endl; + std::exit(EXIT_FAILURE); + } + + //Generic covariance has this form for x_1, x_m, x_2. If we want to remove x_m: + // + // P_(x_1,x_1) P(x_1,x_m) P(x_1,x_2) + // P_(x_m,x_1) P(x_m,x_m) P(x_m,x_2) + // P_(x_2,x_1) P(x_2,x_m) P(x_2,x_2) + // + // to + // + // P_(x_1,x_1) P(x_1,x_2) + // P_(x_2,x_1) P(x_2,x_2) + // + // i.e. x_1 goes from 0 to marg_id, x_2 goes from marg_id+marg_size to Cov.rows() + //in the original covariance + + int marg_size= marg->size(); + int marg_id= marg->id(); + + Eigen::MatrixXd Cov_new( state->nVars()-marg_size, state->nVars()-marg_size); + + int x2_size= state->nVars()-marg_id- marg_size; + + //P_(x_1,x_1) + Cov_new.block(0,0,marg_id,marg_id)= state->Cov().block(0,0,marg_id,marg_id); + + //P_(x_1,x_2) + Cov_new.block(0,marg_id,marg_id,x2_size)= state->Cov().block(0,marg_id+marg_size,marg_id,x2_size); + + //P_(x_2,x_1) + Cov_new.block(marg_id,0,x2_size,marg_id)= Cov_new.block(0,marg_id,marg_id,x2_size).transpose(); + + //P(x_2,x_2) + Cov_new.block(marg_id,marg_id,x2_size,x2_size)= state->Cov().block(marg_id+marg_size,marg_id+marg_size,x2_size,x2_size); + + + //Now set new covariance + state->Cov() = Cov_new; + + + //Now we keep the remaining variables and update their ordering + //Note: DOES NOT SUPPORT MARGINALIZING SUBVARIABLES YET!!!!!!! + std::vector remaining_variables; + for (size_t i=0; i < state->variables().size(); i++){ + //Only keep non-marginal states + if (state->variables(i) != marg){ + if (state->variables(i)->id() > marg_id){ + //If the variable is "beyond" the marginal one in ordering, need + //to "move it forward" + state->variables(i)->set_local_id(state->variables(i)->id()- marg_size); + } + remaining_variables.push_back(state->variables(i)); + } + } + + delete marg; + + + //Now set variables as the remaining ones + state->variables() = remaining_variables; + + } + + /** @brief Remove the oldest clone, if we have more then the max clone count!! + * Note: the marginalizer should have already deleted the clone + * Note: so we just need to remove the pointer to it + * @param state Pointer to state + */ + static void marginalize_old_clone(State* state) { + + if (state->nClones() > state->options().max_clone_size) { + double margTime = state->margtimestep(); + StateHelper::marginalize(state, state->get_clone(margTime)); + state->erase_clone(margTime); + } + + } + /** * @brief Clones "variable to clone" and places it at end of covariance * @param state Pointer to state @@ -23,11 +112,11 @@ class StateHelper{ //Get total size of new cloned variables, and the old covariance size int total_size = variable_to_clone->size(); - int old_size = (int)state->Cov().rows(); - int new_loc = (int)state->Cov().rows(); + int old_size = (int)state->nVars(); + int new_loc = (int)state->nVars(); // Resize both our covariance to the new size - state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->Cov().rows()+total_size,state->Cov().rows()+total_size)); + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars()+total_size,state->nVars()+total_size)); // What is the new state, and variable we inserted const std::vector new_variables = state->variables(); @@ -37,7 +126,7 @@ class StateHelper{ for (size_t k=0; k < state->variables().size(); k++){ // Skip this if it is not the same - Type* type_check = state->variables()[k]->check_if_same_variable(variable_to_clone); + Type* type_check = state->variables(k)->check_if_same_variable(variable_to_clone); if (type_check == nullptr) continue; @@ -90,7 +179,7 @@ class StateHelper{ assert(H.rows() == res.rows()); - Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->Cov().rows(), res.rows()); + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); std::vector H_id; std::vector H_is_active; @@ -166,7 +255,7 @@ class StateHelper{ Eigen::MatrixXd H_Linv = H_L.inverse(); - Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->Cov().rows(), res.rows()); + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); std::vector H_id; std::vector H_is_active; @@ -206,10 +295,10 @@ class StateHelper{ // Covariance of the variable/landmark that will be initialized Eigen::MatrixXd P_LL = H_Linv*M.selfadjointView()*H_Linv.transpose(); - size_t oldSize = state->Cov().rows(); + size_t oldSize = state->nVars(); - state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->Cov().rows()+new_variable->size(), - state->Cov().rows()+new_variable->size())); + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars()+new_variable->size(), + state->nVars()+new_variable->size())); state->Cov().block(0, oldSize, oldSize, new_variable->size()).noalias() = -M_a*H_Linv.transpose(); state->Cov().block(oldSize, 0, new_variable->size(), oldSize) = state->Cov().block(0, oldSize, oldSize, new_variable->size()).transpose(); @@ -221,7 +310,7 @@ class StateHelper{ new_variable->update(H_Linv*res); // Now collect results, and add it to the state variables - new_variable->set_local_id((int)(state->Cov().rows()-new_variable->size())); + new_variable->set_local_id((int)(state->nVars()-new_variable->size())); state->insert_variable(new_variable); @@ -277,7 +366,7 @@ class StateHelper{ //=========================================== // Finally, initialize it in our state - invertible_initialize(state, new_variable, H_order, Hxinit, H_finit, + StateHelper::invertible_initialize(state, new_variable, H_order, Hxinit, H_finit, Rinit, resinit); //Update with updating portion @@ -308,7 +397,7 @@ class StateHelper{ // Check that it was a valid cast if(pose == nullptr) { - ROS_ERROR("INVALID OBJECT RETURNED FROM MARGINALIZER, EXITING!#!@#!@#"); + //ROS_ERROR("INVALID OBJECT RETURNED FROM MARGINALIZER, EXITING!#!@#!@#"); exit(EXIT_FAILURE); } diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index b5712eb07..b321c058d 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -1,5 +1,5 @@ #include "TrackBase.h" -#include "state/StateHelper.h" +#include "state/Propagator.h" void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h index feeb3a953..c9fa972d1 100644 --- a/ov_core/src/types/IMU.h +++ b/ov_core/src/types/IMU.h @@ -26,7 +26,7 @@ class IMU : public Type{ _bg = new Vec(3); _ba = new Vec(3); - Eigen::Matrix imu0; + Eigen::Matrix imu0; imu0.setZero(); imu0(3) = 1.0; diff --git a/ov_core/src/utils/quat_ops.h b/ov_core/src/utils/quat_ops.h index faabb05fb..0c04abc63 100644 --- a/ov_core/src/utils/quat_ops.h +++ b/ov_core/src/utils/quat_ops.h @@ -303,6 +303,60 @@ inline Eigen::Matrix Inv(Eigen::Matrix q){ } +/** + * \brief equation (48) of trawny tech report + */ +inline Eigen::Matrix Omega(Eigen::Matrix w ){ + Eigen::Matrix mat; + mat.block(0,0,3,3) = -skew_x(w); + mat.block(3,0,1,3) = -w.transpose(); + mat.block(0,3,3,1) = w; + mat(3,3) = 0; + return mat; +}; + +/** + * \brief Normalizes a quaternion to make sure it is unit norm + */ +inline Eigen::Matrix quatnorm(Eigen::Matrix q_t) { + if (q_t(3,0) <0){ + q_t*=-1; + } + return q_t/q_t.norm(); +}; + +/** + * Computes left Jacobian of SO(3) + * @param w axis-angle + */ + +inline Eigen::Matrix Jl_so3(Eigen::Matrix w ){ + + double theta= w.norm(); + + if (theta < 1e-12){ + return Eigen::MatrixXd::Identity(3,3); + } + else{ + Eigen::Matrix a= w/theta; + Eigen::Matrix J= sin(theta)/theta*Eigen::MatrixXd::Identity(3,3)+ (1-sin(theta)/theta)*a*a.transpose()+ + ((1-cos(theta))/theta)*skew_x(a); + return J; + + } +}; + +/** + * Computes right Jacobian of SO(3) + * @param w axis-angle + */ +inline Eigen::Matrix Jr_so3(Eigen::Matrix w ){ + + return Jl_so3(-w); +}; + + + #endif /* OV_CORE_QUAT_OPS_H */ \ No newline at end of file From 55980814fdeafce8799420511135c321dadb948a Mon Sep 17 00:00:00 2001 From: Patrick Geneva Date: Wed, 5 Jun 2019 11:58:20 -0400 Subject: [PATCH 5/6] moved to msckf package, cleaned up documentation for types/state/prop --- CMakeLists.txt | 2 +- Doxyfile-mcss | 6 +- ov_core/CMakeLists.txt | 4 +- ov_core/src/cpi/CpiBase.h | 271 +++--- ov_core/src/cpi/CpiV1.h | 679 +++++++------- ov_core/src/cpi/CpiV2.h | 875 +++++++++--------- ov_core/src/state/Options.h | 41 - ov_core/src/state/Propagator.h | 149 --- ov_core/src/state/State.h | 199 ---- ov_core/src/state/StateHelper.h | 432 --------- ov_core/src/test_tracking.cpp | 2 + ov_core/src/track/Feature.h | 60 +- ov_core/src/track/FeatureDatabase.h | 427 ++++----- ov_core/src/track/Grider_DOG.h | 244 ++--- ov_core/src/track/Grider_FAST.h | 168 ++-- ov_core/src/track/TrackAruco.cpp | 3 + ov_core/src/track/TrackAruco.h | 187 ++-- ov_core/src/track/TrackBase.cpp | 4 +- ov_core/src/track/TrackBase.h | 440 ++++----- ov_core/src/track/TrackDescriptor.cpp | 2 + ov_core/src/track/TrackDescriptor.h | 292 +++--- ov_core/src/track/TrackKLT.cpp | 3 + ov_core/src/track/TrackKLT.h | 240 ++--- ov_core/src/types/IMU.h | 223 ----- ov_core/src/types/JPLQuat.h | 110 --- ov_core/src/types/PoseJPL.h | 163 ---- ov_core/src/types/Type.h | 123 --- ov_core/src/types/Vec.h | 57 -- ov_core/src/utils/dataset_reader.h | 182 ++-- ov_core/src/utils/quat_ops.h | 569 ++++++------ ov_msckf/CMakeLists.txt | 79 ++ ov_msckf/package.xml | 48 + .../src/state/Propagator.cpp | 188 ++-- ov_msckf/src/state/Propagator.h | 172 ++++ {ov_core => ov_msckf}/src/state/State.cpp | 40 +- ov_msckf/src/state/State.h | 217 +++++ ov_msckf/src/state/StateHelper.cpp | 366 ++++++++ ov_msckf/src/state/StateHelper.h | 146 +++ ov_msckf/src/state/StateOptions.h | 46 + ov_msckf/src/test_msckf.cpp | 0 ov_msckf/src/types/IMU.h | 245 +++++ ov_msckf/src/types/JPLQuat.h | 122 +++ ov_msckf/src/types/PoseJPL.h | 181 ++++ ov_msckf/src/types/Type.h | 134 +++ ov_msckf/src/types/Vec.h | 61 ++ 45 files changed, 4319 insertions(+), 3883 deletions(-) delete mode 100644 ov_core/src/state/Options.h delete mode 100644 ov_core/src/state/Propagator.h delete mode 100644 ov_core/src/state/State.h delete mode 100644 ov_core/src/state/StateHelper.h delete mode 100644 ov_core/src/types/IMU.h delete mode 100644 ov_core/src/types/JPLQuat.h delete mode 100644 ov_core/src/types/PoseJPL.h delete mode 100644 ov_core/src/types/Type.h delete mode 100644 ov_core/src/types/Vec.h create mode 100644 ov_msckf/CMakeLists.txt create mode 100644 ov_msckf/package.xml rename {ov_core => ov_msckf}/src/state/Propagator.cpp (57%) create mode 100644 ov_msckf/src/state/Propagator.h rename {ov_core => ov_msckf}/src/state/State.cpp (73%) create mode 100644 ov_msckf/src/state/State.h create mode 100644 ov_msckf/src/state/StateHelper.cpp create mode 100644 ov_msckf/src/state/StateHelper.h create mode 100644 ov_msckf/src/state/StateOptions.h create mode 100644 ov_msckf/src/test_msckf.cpp create mode 100644 ov_msckf/src/types/IMU.h create mode 100644 ov_msckf/src/types/JPLQuat.h create mode 100644 ov_msckf/src/types/PoseJPL.h create mode 100644 ov_msckf/src/types/Type.h create mode 100644 ov_msckf/src/types/Vec.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 5199811e3..4ce03b978 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 2.8.8) add_subdirectory(ov_core) -#add_subdirectory(ov_msckf) +add_subdirectory(ov_msckf) diff --git a/Doxyfile-mcss b/Doxyfile-mcss index fcc55d976..3afd52f05 100644 --- a/Doxyfile-mcss +++ b/Doxyfile-mcss @@ -35,7 +35,7 @@ ALIASES += \ ##! M_FAVICON = favicon-dark.png -##! M_LINKS_NAVBAR1 = pages annotated files +##! M_LINKS_NAVBAR1 = pages namespaceov__core namespaceov__msckf annotated ##! M_LINKS_NAVBAR2 = \ -##! "GitHub" -##! M_SEARCH_DOWNLOAD_BINARY = YES \ No newline at end of file +##! "GitHub" +##! M_SEARCH_DOWNLOAD_BINARY = NO \ No newline at end of file diff --git a/ov_core/CMakeLists.txt b/ov_core/CMakeLists.txt index 057be67e1..95be44640 100644 --- a/ov_core/CMakeLists.txt +++ b/ov_core/CMakeLists.txt @@ -67,9 +67,7 @@ add_library(ov_core src/track/TrackAruco.cpp src/track/TrackDescriptor.cpp src/track/TrackKLT.cpp - src/state/State.cpp - src/state/Propagator.cpp - ) +) target_link_libraries(ov_core ${thirdparty_libraries}) diff --git a/ov_core/src/cpi/CpiBase.h b/ov_core/src/cpi/CpiBase.h index b1cf941fe..466618bc6 100644 --- a/ov_core/src/cpi/CpiBase.h +++ b/ov_core/src/cpi/CpiBase.h @@ -1,5 +1,6 @@ #ifndef CPI_BASE_H #define CPI_BASE_H + /* * MIT License * Copyright (c) 2018 Kevin Eckenhoff @@ -29,145 +30,149 @@ #include #include "utils/quat_ops.h" - /** - * @brief Base class for continuous preintegration integrators. - * - * This is the base class that both continuous-time preintegrators extend. - * Please take a look at the derived classes CpiV1 and CpiV2 for the actual implementation. - * Please see the following publication for details on the theory: - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class CpiBase { - -public: - +namespace ov_core { /** - * @brief Default constructor - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiBase(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_= false) { - // Calculate our covariance matrix - Q_c.block(0,0,3,3) = std::pow(sigma_w,2)*eye3; - Q_c.block(3,3,3,3) = std::pow(sigma_wb,2)*eye3; - Q_c.block(6,6,3,3) = std::pow(sigma_a,2)*eye3; - Q_c.block(9,9,3,3) = std::pow(sigma_ab,2)*eye3; - imu_avg = imu_avg_; - // Calculate our unit vectors, and their skews (used in bias jacobian calcs) - e_1 << 1,0,0; - e_2 << 0,1,0; - e_3 << 0,0,1; - e_1x = skew_x(e_1); - e_2x = skew_x(e_2); - e_3x = skew_x(e_3); - } - - - /** - * @brief Set linearization points of the integration. - * @param[in] b_w_lin_ gyroscope bias linearization point - * @param[in] b_a_lin_ accelerometer bias linearization point - * @param[in] q_k_lin_ orientation linearization point (only model 2 uses) - * @param[in] grav_ global gravity at the current timestep + * @brief Base class for continuous preintegration integrators. * - * This function sets the linearization points we are to preintegrate about. - * For model 2 we will also pass the q_GtoK and current gravity estimate. - */ - void setLinearizationPoints(Eigen::Matrix b_w_lin_, Eigen::Matrix b_a_lin_, - Eigen::Matrix q_k_lin_ = Eigen::Matrix::Zero(), - Eigen::Matrix grav_ = Eigen::Matrix::Zero()) { - b_w_lin = b_w_lin_; - b_a_lin = b_a_lin_; - q_k_lin = q_k_lin_; - grav = grav_; - } - - - /** - * @brief Main function that will sequentially compute the preintegration measurement. - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement + * This is the base class that both continuous-time preintegrators extend. + * Please take a look at the derived classes CpiV1 and CpiV2 for the actual implementation. + * Please see the following publication for details on the theory: + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf * - * This new IMU messages and will precompound our measurements, jacobians, and measurement covariance. - * Please see both CpiV1 and CpiV2 classes for implementation details on how this works. + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance */ - virtual void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) = 0; - - - - // Flag if we should perform IMU averaging or not - // For version 1 we should average the measurement - // For version 2 we average the local true - bool imu_avg = false; - - - // Measurement Means - double DT = 0; ///< measurement integration time - Eigen::Matrix alpha_tau = Eigen::Matrix::Zero(); ///< alpha measurement mean - Eigen::Matrix beta_tau = Eigen::Matrix::Zero(); ///< beta measurement mean - Eigen::Matrix q_k2tau; ///< orientation measurement mean - Eigen::Matrix R_k2tau = Eigen::Matrix::Identity(); ///< orientation measurement mean - - // Jacobians - Eigen::Matrix J_q = Eigen::Matrix::Zero(); ///< orientation Jacobian wrt b_w - Eigen::Matrix J_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_w - Eigen::Matrix J_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_w - Eigen::Matrix H_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_a - Eigen::Matrix H_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_a - - // Linearization points - Eigen::Matrix b_w_lin; ///< b_w linearization point (gyroscope) - Eigen::Matrix b_a_lin; ///< b_a linearization point (accelerometer) - Eigen::Matrix q_k_lin; ///< q_k linearization point (only model 2 uses) - - /// Global gravity - Eigen::Matrix grav = Eigen::Matrix::Zero(); - - /// Our continous-time measurement noise matrix (computed from contructor noise values) - Eigen::Matrix Q_c = Eigen::Matrix::Zero(); - - /// Our final measurement covariance - Eigen::Matrix P_meas = Eigen::Matrix::Zero(); - - - //========================================================================== - // HELPER VARIABLES - //========================================================================== - - // 3x3 identity matrix - Eigen::Matrix eye3 = Eigen::Matrix::Identity(); - - // Simple unit vectors (used in bias jacobian calculations) - Eigen::Matrix e_1;// = Eigen::Matrix::Constant(1,0,0); - Eigen::Matrix e_2;// = Eigen::Matrix::Constant(0,1,0); - Eigen::Matrix e_3;// = Eigen::Matrix::Constant(0,0,1); - - // Calculate the skew-symetric of our unit vectors - Eigen::Matrix e_1x;// = skew_x(e_1); - Eigen::Matrix e_2x;// = skew_x(e_2); - Eigen::Matrix e_3x;// = skew_x(e_3); - - -}; - - + class CpiBase { + + public: + + + /** + * @brief Default constructor + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiBase(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) { + // Calculate our covariance matrix + Q_c.block(0, 0, 3, 3) = std::pow(sigma_w, 2) * eye3; + Q_c.block(3, 3, 3, 3) = std::pow(sigma_wb, 2) * eye3; + Q_c.block(6, 6, 3, 3) = std::pow(sigma_a, 2) * eye3; + Q_c.block(9, 9, 3, 3) = std::pow(sigma_ab, 2) * eye3; + imu_avg = imu_avg_; + // Calculate our unit vectors, and their skews (used in bias jacobian calcs) + e_1 << 1, 0, 0; + e_2 << 0, 1, 0; + e_3 << 0, 0, 1; + e_1x = skew_x(e_1); + e_2x = skew_x(e_2); + e_3x = skew_x(e_3); + } + + + /** + * @brief Set linearization points of the integration. + * @param[in] b_w_lin_ gyroscope bias linearization point + * @param[in] b_a_lin_ accelerometer bias linearization point + * @param[in] q_k_lin_ orientation linearization point (only model 2 uses) + * @param[in] grav_ global gravity at the current timestep + * + * This function sets the linearization points we are to preintegrate about. + * For model 2 we will also pass the q_GtoK and current gravity estimate. + */ + void setLinearizationPoints(Eigen::Matrix b_w_lin_, Eigen::Matrix b_a_lin_, + Eigen::Matrix q_k_lin_ = Eigen::Matrix::Zero(), + Eigen::Matrix grav_ = Eigen::Matrix::Zero()) { + b_w_lin = b_w_lin_; + b_a_lin = b_a_lin_; + q_k_lin = q_k_lin_; + grav = grav_; + } + + + /** + * @brief Main function that will sequentially compute the preintegration measurement. + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * This new IMU messages and will precompound our measurements, jacobians, and measurement covariance. + * Please see both CpiV1 and CpiV2 classes for implementation details on how this works. + */ + virtual void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) = 0; + + + // Flag if we should perform IMU averaging or not + // For version 1 we should average the measurement + // For version 2 we average the local true + bool imu_avg = false; + + + // Measurement Means + double DT = 0; ///< measurement integration time + Eigen::Matrix alpha_tau = Eigen::Matrix::Zero(); ///< alpha measurement mean + Eigen::Matrix beta_tau = Eigen::Matrix::Zero(); ///< beta measurement mean + Eigen::Matrix q_k2tau; ///< orientation measurement mean + Eigen::Matrix R_k2tau = Eigen::Matrix::Identity(); ///< orientation measurement mean + + // Jacobians + Eigen::Matrix J_q = Eigen::Matrix::Zero(); ///< orientation Jacobian wrt b_w + Eigen::Matrix J_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_w + Eigen::Matrix J_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_w + Eigen::Matrix H_a = Eigen::Matrix::Zero(); ///< alpha Jacobian wrt b_a + Eigen::Matrix H_b = Eigen::Matrix::Zero(); ///< beta Jacobian wrt b_a + + // Linearization points + Eigen::Matrix b_w_lin; ///< b_w linearization point (gyroscope) + Eigen::Matrix b_a_lin; ///< b_a linearization point (accelerometer) + Eigen::Matrix q_k_lin; ///< q_k linearization point (only model 2 uses) + + /// Global gravity + Eigen::Matrix grav = Eigen::Matrix::Zero(); + + /// Our continous-time measurement noise matrix (computed from contructor noise values) + Eigen::Matrix Q_c = Eigen::Matrix::Zero(); + + /// Our final measurement covariance + Eigen::Matrix P_meas = Eigen::Matrix::Zero(); + + + //========================================================================== + // HELPER VARIABLES + //========================================================================== + + // 3x3 identity matrix + Eigen::Matrix eye3 = Eigen::Matrix::Identity(); + + // Simple unit vectors (used in bias jacobian calculations) + Eigen::Matrix e_1;// = Eigen::Matrix::Constant(1,0,0); + Eigen::Matrix e_2;// = Eigen::Matrix::Constant(0,1,0); + Eigen::Matrix e_3;// = Eigen::Matrix::Constant(0,0,1); + + // Calculate the skew-symetric of our unit vectors + Eigen::Matrix e_1x;// = skew_x(e_1); + Eigen::Matrix e_2x;// = skew_x(e_2); + Eigen::Matrix e_3x;// = skew_x(e_3); + + + }; + +} #endif /* CPI_BASE_H */ \ No newline at end of file diff --git a/ov_core/src/cpi/CpiV1.h b/ov_core/src/cpi/CpiV1.h index 4848f0b90..42ae11b6d 100644 --- a/ov_core/src/cpi/CpiV1.h +++ b/ov_core/src/cpi/CpiV1.h @@ -1,5 +1,6 @@ #ifndef CPI_V1_H #define CPI_V1_H + /* * MIT License * Copyright (c) 2018 Kevin Eckenhoff @@ -30,356 +31,362 @@ #include "CpiBase.h" #include "utils/quat_ops.h" - /** - * @brief Model 1 of continuous preintegration. - * - * This model is the "piecewise constant measurement assumption" which was first presented in: - * > Eckenhoff, Kevin, Patrick Geneva, and Guoquan Huang. - * > "High-accuracy preintegration for visual inertial navigation." - * > International Workshop on the Algorithmic Foundations of Robotics. 2016. - * Please see the following publication for details on the theory: - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class CpiV1: public CpiBase { - -public: - - /** - * @brief Default constructor for our Model 1 preintegration (piecewise constant measurement assumption) - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiV1(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_= false): - CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_){} +namespace ov_core { /** - * @brief Our precompound function for Model 1 - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement + * @brief Model 1 of continuous preintegration. + * + * This model is the "piecewise constant measurement assumption" which was first presented in: + * > Eckenhoff, Kevin, Patrick Geneva, and Guoquan Huang. + * > "High-accuracy preintegration for visual inertial navigation." + * > International Workshop on the Algorithmic Foundations of Robotics. 2016. + * Please see the following publication for details on the theory: + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf * - * We will first analytically integrate our meansurements and Jacobians. - * Then we perform numerical integration for our measurement covariance. + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance */ - void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { + class CpiV1 : public CpiBase { + + public: + + /** + * @brief Default constructor for our Model 1 preintegration (piecewise constant measurement assumption) + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiV1(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) : + CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} + + + /** + * @brief Our precompound function for Model 1 + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * We will first analytically integrate our meansurements and Jacobians. + * Then we perform numerical integration for our measurement covariance. + */ + void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { + + + // Get time difference + double delta_t = t_1 - t_0; + DT += delta_t; + + //If no time has passed do nothing + if (delta_t == 0) { + return; + } + + // Get estimated imu readings + Eigen::Matrix w_hat = w_m_0 - b_w_lin; + Eigen::Matrix a_hat = a_m_0 - b_a_lin; + + // If averaging, average + if (imu_avg) { + w_hat += w_m_1 - b_w_lin; + w_hat = 0.5 * w_hat; + a_hat += a_m_1 - b_a_lin; + a_hat = .5 * a_hat; + } + + // Get angle change w*dt + Eigen::Matrix w_hatdt = w_hat * delta_t; + + // Get entries of w_hat + double w_1 = w_hat(0, 0); + double w_2 = w_hat(1, 0); + double w_3 = w_hat(2, 0); + + // Get magnitude of w and wdt + double mag_w = w_hat.norm(); + double w_dt = mag_w * delta_t; + + // Threshold to determine if equations will be unstable + bool small_w = (mag_w < 0.008726646); + + // Get some of the variables used in the preintegration equations + double dt_2 = pow(delta_t, 2); + double cos_wt = cos(w_dt); + double sin_wt = sin(w_dt); + + Eigen::Matrix w_x = skew_x(w_hat); + Eigen::Matrix a_x = skew_x(a_hat); + Eigen::Matrix w_tx = skew_x(w_hatdt); + Eigen::Matrix w_x_2 = w_x * w_x; + + + //========================================================================== + // MEASUREMENT MEANS + //========================================================================== + + // Get relative rotation + Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 : + eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; + + // Updated rotation and its transpose + Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; + Eigen::Matrix R_tau12k = R_k2tau1.transpose(); + + //Intermediate variables for evaluating the measurement/bias Jacobian update + double f_1; + double f_2; + double f_3; + double f_4; + + if (small_w) { + f_1 = -(pow(delta_t, 3) / 3); + f_2 = (pow(delta_t, 4) / 8); + f_3 = -(pow(delta_t, 2) / 2); + f_4 = (pow(delta_t, 3) / 6); + } else { + f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); + f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); + f_3 = -(1 - cos_wt) / pow(mag_w, 2); + f_4 = (w_dt - sin_wt) / pow(mag_w, 3); + } + + // Compute the main part of our analytical means + Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); + Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); + + // Matrices that will multiply the a_hat in the update expressions + Eigen::MatrixXd H_al = R_tau12k * alpha_arg; + Eigen::MatrixXd H_be = R_tau12k * Beta_arg; + + // Update the measurement means + alpha_tau += beta_tau * delta_t + H_al * a_hat; + beta_tau += H_be * a_hat; + + + //========================================================================== + // BIAS JACOBIANS (ANALYTICAL) + //========================================================================== + + // Get right Jacobian + Eigen::Matrix J_r_tau1 = small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx : + eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; + + // Update orientation in respect to gyro bias Jacobians + J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; + + // Update alpha and beta in respect to accel bias Jacobians + H_a -= H_al; + H_a += delta_t * H_b; + H_b -= H_be; + + // Derivatives of R_tau12k wrt bias_w entries + Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); + Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); + Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); + + // Now compute the gyro bias Jacobian terms + double df_1_dbw_1; + double df_1_dbw_2; + double df_1_dbw_3; + + double df_2_dbw_1; + double df_2_dbw_2; + double df_2_dbw_3; + + double df_3_dbw_1; + double df_3_dbw_2; + double df_3_dbw_3; + + double df_4_dbw_1; + double df_4_dbw_2; + double df_4_dbw_3; + + if (small_w) { + double df_1_dw_mag = -(pow(delta_t, 5) / 15); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(delta_t, 6) / 72); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = -(pow(delta_t, 4) / 12); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (pow(delta_t, 5) / 60); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } else { + double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + + double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } + + // Update alpha and beta gyro bias Jacobians + J_a += J_b * delta_t; + J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + + df_2_dbw_1 * w_x_2 - + f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat; + J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + + df_2_dbw_2 * w_x_2 - + f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat; + J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + + df_2_dbw_3 * w_x_2 - + f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat; + J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + + df_4_dbw_1 * w_x_2 - + f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat; + J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + + df_4_dbw_2 * w_x_2 - + f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat; + J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + + df_4_dbw_3 * w_x_2 - + f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat; + + + //========================================================================== + // MEASUREMENT COVARIANCE + //========================================================================== + + // Going to need orientation at intermediate time i.e. at .5*dt; + Eigen::Matrix R_mid = small_w ? eye3 - .5 * delta_t * w_x + (pow(.5 * delta_t, 2) / 2) * w_x_2 : + eye3 - (sin(mag_w * .5 * delta_t) / mag_w) * w_x + ((1.0 - cos(mag_w * .5 * delta_t)) / (pow(mag_w, 2.0))) * w_x_2; + R_mid = R_mid * R_k2tau; + + + //Compute covariance (in this implementation, we use RK4) + //k1------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); + F_k1.block(0, 0, 3, 3) = -w_x; + F_k1.block(0, 3, 3, 3) = -eye3; + F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; + F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); + F_k1.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); + G_k1.block(0, 0, 3, 3) = -eye3; + G_k1.block(3, 3, 3, 3) = eye3; + G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); + G_k1.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_dot_k1 = F_k1 * P_meas + P_meas * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); + + + //k2------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); + F_k2.block(0, 0, 3, 3) = -w_x; + F_k2.block(0, 3, 3, 3) = -eye3; + F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; + F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); + F_k2.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); + G_k2.block(0, 0, 3, 3) = -eye3; + G_k2.block(3, 3, 3, 3) = eye3; + G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); + G_k2.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_k2 = P_meas + P_dot_k1 * delta_t / 2.0; + Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); + + //k3------------------------------------------------------------------------------------------------- + + // Our state and noise Jacobians are the same as k2 + // Since k2 and k3 correspond to the same estimates for the midpoint + Eigen::Matrix F_k3 = F_k2; + Eigen::Matrix G_k3 = G_k2; + + // Get covariance derivative + Eigen::Matrix P_k3 = P_meas + P_dot_k2 * delta_t / 2.0; + Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); + + //k4------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); + F_k4.block(0, 0, 3, 3) = -w_x; + F_k4.block(0, 3, 3, 3) = -eye3; + F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; + F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); + F_k4.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); + G_k4.block(0, 0, 3, 3) = -eye3; + G_k4.block(3, 3, 3, 3) = eye3; + G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); + G_k4.block(9, 9, 3, 3) = eye3; + + // Get covariance derivative + Eigen::Matrix P_k4 = P_meas + P_dot_k3 * delta_t; + Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); + + + //done------------------------------------------------------------------------------------------------- + + // Collect covariance solution + // Ensure it is positive definite + P_meas += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); + P_meas = 0.5 * (P_meas + P_meas.transpose()); + // Update rotation mean + // Note we had to wait to do this, since we use the old orientation in our covariance calculation + R_k2tau = R_k2tau1; + q_k2tau = rot_2_quat(R_k2tau); - // Get time difference - double delta_t = t_1-t_0; - DT += delta_t; - - //If no time has passed do nothing - if (delta_t == 0){ - return; - } - - // Get estimated imu readings - Eigen::Matrix w_hat = w_m_0 - b_w_lin; - Eigen::Matrix a_hat = a_m_0 - b_a_lin; - // If averaging, average - if (imu_avg){ - w_hat += w_m_1- b_w_lin; - w_hat = 0.5*w_hat; - a_hat += a_m_1- b_a_lin; - a_hat = .5*a_hat; } - // Get angle change w*dt - Eigen::Matrix w_hatdt = w_hat * delta_t; - - // Get entries of w_hat - double w_1 = w_hat(0, 0); - double w_2 = w_hat(1, 0); - double w_3 = w_hat(2, 0); - - // Get magnitude of w and wdt - double mag_w = w_hat.norm(); - double w_dt = mag_w * delta_t; - - // Threshold to determine if equations will be unstable - bool small_w = (mag_w < 0.008726646); - - // Get some of the variables used in the preintegration equations - double dt_2 = pow(delta_t,2); - double cos_wt = cos(w_dt); - double sin_wt = sin(w_dt); - - Eigen::Matrix w_x = skew_x(w_hat); - Eigen::Matrix a_x = skew_x(a_hat); - Eigen::Matrix w_tx = skew_x(w_hatdt); - Eigen::Matrix w_x_2 = w_x * w_x; - - - //========================================================================== - // MEASUREMENT MEANS - //========================================================================== - - // Get relative rotation - Eigen::Matrix R_tau2tau1 = small_w? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2: - eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; - - // Updated rotation and its transpose - Eigen::Matrix R_k2tau1 = R_tau2tau1*R_k2tau; - Eigen::Matrix R_tau12k = R_k2tau1.transpose(); - - //Intermediate variables for evaluating the measurement/bias Jacobian update - double f_1; - double f_2; - double f_3; - double f_4; - - if (small_w) { - f_1 = -(pow(delta_t, 3) / 3); - f_2 = (pow(delta_t, 4) / 8); - f_3 = -(pow(delta_t, 2) / 2); - f_4 = (pow(delta_t, 3) / 6); - } else { - f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); - f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); - f_3 = -(1 - cos_wt) / pow(mag_w, 2); - f_4 = (w_dt - sin_wt) / pow(mag_w, 3); - } - - // Compute the main part of our analytical means - Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); - Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); - - // Matrices that will multiply the a_hat in the update expressions - Eigen::MatrixXd H_al = R_tau12k * alpha_arg; - Eigen::MatrixXd H_be = R_tau12k * Beta_arg; - - // Update the measurement means - alpha_tau += beta_tau * delta_t + H_al * a_hat; - beta_tau += H_be * a_hat; - - - //========================================================================== - // BIAS JACOBIANS (ANALYTICAL) - //========================================================================== - - // Get right Jacobian - Eigen::Matrix J_r_tau1 = small_w? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx: - eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + - ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; - - // Update orientation in respect to gyro bias Jacobians - J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; - - // Update alpha and beta in respect to accel bias Jacobians - H_a -= H_al; - H_a += delta_t*H_b; - H_b -= H_be; - - // Derivatives of R_tau12k wrt bias_w entries - Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); - Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); - Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); - - // Now compute the gyro bias Jacobian terms - double df_1_dbw_1; - double df_1_dbw_2; - double df_1_dbw_3; - - double df_2_dbw_1; - double df_2_dbw_2; - double df_2_dbw_3; - - double df_3_dbw_1; - double df_3_dbw_2; - double df_3_dbw_3; - - double df_4_dbw_1; - double df_4_dbw_2; - double df_4_dbw_3; - - if (small_w) { - double df_1_dw_mag = -(pow(delta_t, 5) / 15); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - double df_2_dw_mag = (pow(delta_t, 6) / 72); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = -(pow(delta_t, 4) / 12); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (pow(delta_t, 5) / 60); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } - else { - double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - - double df_2_dw_mag = (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } - - // Update alpha and beta gyro bias Jacobians - J_a += J_b * delta_t; - J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + - df_2_dbw_1 * w_x_2 - - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat; - J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + - df_2_dbw_2 * w_x_2 - - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat; - J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + - df_2_dbw_3 * w_x_2 - - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat; - J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + - df_4_dbw_1 * w_x_2 - - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat; - J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + - df_4_dbw_2 * w_x_2 - - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat; - J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + - df_4_dbw_3 * w_x_2 - - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat; - - - //========================================================================== - // MEASUREMENT COVARIANCE - //========================================================================== - - // Going to need orientation at intermediate time i.e. at .5*dt; - Eigen::Matrix R_mid = small_w? eye3 - .5*delta_t * w_x + (pow(.5*delta_t, 2) / 2) * w_x_2: - eye3 - (sin(mag_w*.5*delta_t) / mag_w) * w_x + ((1.0 - cos(mag_w*.5*delta_t)) / (pow(mag_w, 2.0))) * w_x_2; - R_mid = R_mid*R_k2tau; - - - //Compute covariance (in this implementation, we use RK4) - //k1------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); - F_k1.block(0, 0, 3, 3) = -w_x; - F_k1.block(0, 3, 3, 3) = -eye3; - F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; - F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); - F_k1.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); - G_k1.block(0, 0, 3, 3) = -eye3; - G_k1.block(3, 3, 3, 3) = eye3; - G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); - G_k1.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_dot_k1 = F_k1 * P_meas + P_meas * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); - - - //k2------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); - F_k2.block(0, 0, 3, 3) = -w_x; - F_k2.block(0, 3, 3, 3) = -eye3; - F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; - F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); - F_k2.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); - G_k2.block(0, 0, 3, 3) = -eye3; - G_k2.block(3, 3, 3, 3) = eye3; - G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); - G_k2.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_k2 = P_meas + P_dot_k1 * delta_t / 2.0; - Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); - - //k3------------------------------------------------------------------------------------------------- - - // Our state and noise Jacobians are the same as k2 - // Since k2 and k3 correspond to the same estimates for the midpoint - Eigen::Matrix F_k3 = F_k2; - Eigen::Matrix G_k3 = G_k2; - - // Get covariance derivative - Eigen::Matrix P_k3 = P_meas + P_dot_k2 * delta_t / 2.0; - Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); - - //k4------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); - F_k4.block(0, 0, 3, 3) = -w_x; - F_k4.block(0, 3, 3, 3) = -eye3; - F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; - F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); - F_k4.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); - G_k4.block(0, 0, 3, 3) = -eye3; - G_k4.block(3, 3, 3, 3) = eye3; - G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); - G_k4.block(9, 9, 3, 3) = eye3; - - // Get covariance derivative - Eigen::Matrix P_k4 = P_meas + P_dot_k3 * delta_t; - Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); - - - //done------------------------------------------------------------------------------------------------- - - // Collect covariance solution - // Ensure it is positive definite - P_meas += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); - P_meas = 0.5 * (P_meas + P_meas.transpose()); - - // Update rotation mean - // Note we had to wait to do this, since we use the old orientation in our covariance calculation - R_k2tau = R_k2tau1; - q_k2tau = rot_2_quat(R_k2tau); - - - } - -}; + }; +} #endif /* CPI_V1_H */ \ No newline at end of file diff --git a/ov_core/src/cpi/CpiV2.h b/ov_core/src/cpi/CpiV2.h index 14690d68e..5144f333b 100644 --- a/ov_core/src/cpi/CpiV2.h +++ b/ov_core/src/cpi/CpiV2.h @@ -1,5 +1,6 @@ #ifndef CPI_V2_H #define CPI_V2_H + /* * MIT License * Copyright (c) 2018 Kevin Eckenhoff @@ -30,463 +31,467 @@ #include "CpiBase.h" #include "utils/quat_ops.h" - /** - * @brief Model 2 of continuous preintegration. - * - * This model is the "piecewise constant local acceleration assumption." - * Please see the following publication for details on the theory: - * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation - * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang - * > http://udel.edu/~ghuang/papers/tr_cpi.pdf - * - * The steps to use this preintegration class are as follows: - * 1. call setLinearizationPoints() to set the bias/orientation linearization point - * 2. call feed_IMU() will all IMU measurements you want to precompound over - * 3. access public varibles, to get means, Jacobians, and measurement covariance + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class CpiV2: public CpiBase { - -private: - - // Extended covariance used to handle the sampling model - Eigen::Matrix P_big = Eigen::Matrix::Zero(); - - // Our large compounded state transition Jacobian matrix - Eigen::Matrix Discrete_J_b = Eigen::Matrix::Identity(); - - -public: - - - /** - * @brief - * If we want to use analytical jacobians or not. - * In the paper we just numerically integrated the jacobians - * If set to false, we use a closed form version similar to model 1. - */ - bool state_transition_jacobians = true; - - - // Alpha and beta Jacobians wrt linearization orientation - Eigen::Matrix O_a= Eigen::Matrix::Zero(); - Eigen::Matrix O_b= Eigen::Matrix::Zero(); +namespace ov_core { /** - * @brief Default constructor for our Model 2 preintegration (piecewise constant local acceleration assumption) - * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) - * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) - * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) - * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) - * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) - */ - CpiV2(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_= false): - CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_){ } - - - - /** - * @brief Our precompound function for Model 2 - * @param[in] t_0 first IMU timestamp - * @param[in] t_1 second IMU timestamp - * @param[in] w_m_0 first imu gyroscope measurement - * @param[in] a_m_0 first imu acceleration measurement - * @param[in] w_m_1 second imu gyroscope measurement - * @param[in] a_m_1 second imu acceleration measurement + * @brief Model 2 of continuous preintegration. + * + * This model is the "piecewise constant local acceleration assumption." + * Please see the following publication for details on the theory: + * > Continuous Preintegration Theory for Graph-based Visual-Inertial Navigation + * > Authors: Kevin Eckenhoff, Patrick Geneva, and Guoquan Huang + * > http://udel.edu/~ghuang/papers/tr_cpi.pdf * - * We will first analytically integrate our meansurement. - * We can numerically or analytically integrate our bias jacobians. - * Then we perform numerical integration for our measurement covariance. + * The steps to use this preintegration class are as follows: + * 1. call setLinearizationPoints() to set the bias/orientation linearization point + * 2. call feed_IMU() will all IMU measurements you want to precompound over + * 3. access public varibles, to get means, Jacobians, and measurement covariance */ - void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, - Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), - Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { - - //Get time difference - double delta_t= t_1-t_0; - DT += delta_t; - - // If no time has passed do nothing - if (delta_t == 0){ - return; - } - - // Get estimated imu readings - Eigen::Matrix w_hat= w_m_0 - b_w_lin; - Eigen::Matrix a_hat= a_m_0 - b_a_lin - R_k2tau*quat_2_Rot(q_k_lin)*grav; - - // If averaging, average - // Note: we will average the LOCAL acceleration after getting the relative rotation - if (imu_avg){ - w_hat += w_m_1- b_w_lin; - w_hat = 0.5*w_hat; - } - - // Get angle change w*dt - Eigen::Matrix w_hatdt = w_hat * delta_t; + class CpiV2 : public CpiBase { - // Get entries of w_hat - double w_1 = w_hat(0, 0); - double w_2 = w_hat(1, 0); - double w_3 = w_hat(2, 0); + private: + + // Extended covariance used to handle the sampling model + Eigen::Matrix P_big = Eigen::Matrix::Zero(); - //Get magnitude of w and wdt - double mag_w = w_hat.norm(); - double w_dt = mag_w * delta_t; + // Our large compounded state transition Jacobian matrix + Eigen::Matrix Discrete_J_b = Eigen::Matrix::Identity(); - //Threshold to determine if equations will be unstable - bool small_w= (mag_w < 0.008726646); - //Get some of the variables used in the preintegration equations - double dt_2= pow(delta_t,2); - double cos_wt = cos(w_dt); - double sin_wt = sin(w_dt); + public: + - Eigen::Matrix w_x = skew_x(w_hat); - Eigen::Matrix w_tx = skew_x(w_hatdt); - Eigen::Matrix w_x_2 = w_x * w_x; + /** + * @brief + * If we want to use analytical jacobians or not. + * In the paper we just numerically integrated the jacobians + * If set to false, we use a closed form version similar to model 1. + */ + bool state_transition_jacobians = true; + + + // Alpha and beta Jacobians wrt linearization orientation + Eigen::Matrix O_a = Eigen::Matrix::Zero(); + Eigen::Matrix O_b = Eigen::Matrix::Zero(); + + + /** + * @brief Default constructor for our Model 2 preintegration (piecewise constant local acceleration assumption) + * @param sigma_w gyroscope white noise density (rad/s/sqrt(hz)) + * @param sigma_wb gyroscope random walk (rad/s^2/sqrt(hz)) + * @param sigma_a accelerometer white noise density (m/s^2/sqrt(hz)) + * @param sigma_ab accelerometer random walk (m/s^3/sqrt(hz)) + * @param imu_avg_ if we want to average the imu measurements (IJRR paper did not do this) + */ + CpiV2(double sigma_w, double sigma_wb, double sigma_a, double sigma_ab, bool imu_avg_ = false) : + CpiBase(sigma_w, sigma_wb, sigma_a, sigma_ab, imu_avg_) {} + + + /** + * @brief Our precompound function for Model 2 + * @param[in] t_0 first IMU timestamp + * @param[in] t_1 second IMU timestamp + * @param[in] w_m_0 first imu gyroscope measurement + * @param[in] a_m_0 first imu acceleration measurement + * @param[in] w_m_1 second imu gyroscope measurement + * @param[in] a_m_1 second imu acceleration measurement + * + * We will first analytically integrate our meansurement. + * We can numerically or analytically integrate our bias jacobians. + * Then we perform numerical integration for our measurement covariance. + */ + void feed_IMU(double t_0, double t_1, Eigen::Matrix w_m_0, Eigen::Matrix a_m_0, + Eigen::Matrix w_m_1 = Eigen::Matrix::Zero(), + Eigen::Matrix a_m_1 = Eigen::Matrix::Zero()) { + + //Get time difference + double delta_t = t_1 - t_0; + DT += delta_t; + + // If no time has passed do nothing + if (delta_t == 0) { + return; + } + + // Get estimated imu readings + Eigen::Matrix w_hat = w_m_0 - b_w_lin; + Eigen::Matrix a_hat = a_m_0 - b_a_lin - R_k2tau * quat_2_Rot(q_k_lin) * grav; + + // If averaging, average + // Note: we will average the LOCAL acceleration after getting the relative rotation + if (imu_avg) { + w_hat += w_m_1 - b_w_lin; + w_hat = 0.5 * w_hat; + } + + // Get angle change w*dt + Eigen::Matrix w_hatdt = w_hat * delta_t; + + // Get entries of w_hat + double w_1 = w_hat(0, 0); + double w_2 = w_hat(1, 0); + double w_3 = w_hat(2, 0); + + //Get magnitude of w and wdt + double mag_w = w_hat.norm(); + double w_dt = mag_w * delta_t; + + //Threshold to determine if equations will be unstable + bool small_w = (mag_w < 0.008726646); + + //Get some of the variables used in the preintegration equations + double dt_2 = pow(delta_t, 2); + double cos_wt = cos(w_dt); + double sin_wt = sin(w_dt); + + Eigen::Matrix w_x = skew_x(w_hat); + Eigen::Matrix w_tx = skew_x(w_hatdt); + Eigen::Matrix w_x_2 = w_x * w_x; + + + //========================================================================== + // MEASUREMENT MEANS + //========================================================================== + + // Get relative rotation + Eigen::Matrix R_tau2tau1 = small_w ? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2 : + eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; + + // Updated roation and its transpose + Eigen::Matrix R_k2tau1 = R_tau2tau1 * R_k2tau; + Eigen::Matrix R_tau12k = R_k2tau1.transpose(); + + // If averaging, average the LOCAL acceleration + if (imu_avg) { + a_hat += a_m_1 - b_a_lin - R_k2tau1 * quat_2_Rot(q_k_lin) * grav; + a_hat = 0.5 * a_hat; + } + Eigen::Matrix a_x = skew_x(a_hat); + + // Intermediate variables for evaluating the measurement/bias Jacobian update + double f_1; + double f_2; + double f_3; + double f_4; + + if (small_w) { + f_1 = -(pow(delta_t, 3) / 3); + f_2 = (pow(delta_t, 4) / 8); + f_3 = -(pow(delta_t, 2) / 2); + f_4 = (pow(delta_t, 3) / 6); + } else { + f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); + f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); + f_3 = -(1 - cos_wt) / pow(mag_w, 2); + f_4 = (w_dt - sin_wt) / pow(mag_w, 3); + } + + // Compute the main part of our analytical means + Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); + Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); + + // Matrices that will multiply the a_hat in the update expressions + Eigen::Matrix H_al = R_tau12k * alpha_arg; + Eigen::Matrix H_be = R_tau12k * Beta_arg; + + // Update the measurements + alpha_tau += beta_tau * delta_t + H_al * a_hat; + beta_tau += H_be * a_hat; + + + //========================================================================== + // BIAS JACOBIANS (ANALYTICAL) + //========================================================================== + + //Get right Jacobian + Eigen::Matrix J_r_tau1 = small_w ? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx : + eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + + ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; + + // Update orientation in respect to gyro bias Jacobians + Eigen::Matrix J_save = J_q; + J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; + + // Update alpha and beta in respect to accel bias Jacobian + H_a -= H_al; + H_a += delta_t * H_b; + H_b -= H_be; + + // Update alpha and beta in respect to q_GtoLIN Jacobian + Eigen::Matrix g_k = quat_2_Rot(q_k_lin) * grav; + O_a += delta_t * O_b; + O_a += -H_al * R_k2tau * skew_x(g_k); + O_b += -H_be * R_k2tau * skew_x(g_k); + + // Derivatives of R_tau12k wrt bias_w entries + Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); + Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); + Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); + + + // Now compute the gyro bias Jacobian terms + double df_1_dbw_1; + double df_1_dbw_2; + double df_1_dbw_3; + + double df_2_dbw_1; + double df_2_dbw_2; + double df_2_dbw_3; + + double df_3_dbw_1; + double df_3_dbw_2; + double df_3_dbw_3; + + double df_4_dbw_1; + double df_4_dbw_2; + double df_4_dbw_3; + + + if (small_w) { + double df_1_dw_mag = -(pow(delta_t, 5) / 15); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + double df_2_dw_mag = (pow(delta_t, 6) / 72); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = -(pow(delta_t, 4) / 12); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (pow(delta_t, 5) / 60); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } else { + double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); + df_1_dbw_1 = w_1 * df_1_dw_mag; + df_1_dbw_2 = w_2 * df_1_dw_mag; + df_1_dbw_3 = w_3 * df_1_dw_mag; + + + double df_2_dw_mag = + (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); + df_2_dbw_1 = w_1 * df_2_dw_mag; + df_2_dbw_2 = w_2 * df_2_dw_mag; + df_2_dbw_3 = w_3 * df_2_dw_mag; + + double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); + df_3_dbw_1 = w_1 * df_3_dw_mag; + df_3_dbw_2 = w_2 * df_3_dw_mag; + df_3_dbw_3 = w_3 * df_3_dw_mag; + + double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); + df_4_dbw_1 = w_1 * df_4_dw_mag; + df_4_dbw_2 = w_2 * df_4_dw_mag; + df_4_dbw_3 = w_3 * df_4_dw_mag; + } + + // Gravity rotated into the "tau'th" frame (i.e. start of the measurement interval) + Eigen::Matrix g_tau = R_k2tau * quat_2_Rot(q_k_lin) * grav; + + + // Update gyro bias Jacobians + J_a += J_b * delta_t; + J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + + df_2_dbw_1 * w_x_2 - + f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat + - H_al * skew_x((J_save * e_1)) * g_tau; + J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + + df_2_dbw_2 * w_x_2 - + f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat + - H_al * skew_x((J_save * e_2)) * g_tau; + J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + + df_2_dbw_3 * w_x_2 - + f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat + - H_al * skew_x((J_save * e_3)) * g_tau; + J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + + df_4_dbw_1 * w_x_2 - + f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat - + -H_be * skew_x((J_save * e_1)) * g_tau; + J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + + df_4_dbw_2 * w_x_2 - + f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat + - H_be * skew_x((J_save * e_2)) * g_tau; + J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + + df_4_dbw_3 * w_x_2 - + f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat + - H_be * skew_x((J_save * e_3)) * g_tau; + + + + + //========================================================================== + // MEASUREMENT COVARIANCE + //========================================================================== + + // Going to need orientation at intermediate time i.e. at .5*dt; + Eigen::Matrix R_G_to_k = quat_2_Rot(q_k_lin); + double dt_mid = delta_t / 2.0; + double w_dt_mid = mag_w * dt_mid; + Eigen::Matrix R_mid; + + // The middle of this interval (i.e., rotation from k to mid) + R_mid = small_w ? eye3 - dt_mid * w_x + (pow(dt_mid, 2) / 2) * w_x_2 : + eye3 - (sin(w_dt_mid) / mag_w) * w_x + ((1.0 - cos(w_dt_mid)) / (pow(mag_w, 2.0))) * w_x_2; + R_mid = R_mid * R_k2tau; + + + // Compute covariance (in this implementation, we use RK4) + //k1------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); + F_k1.block(0, 0, 3, 3) = -w_x; + F_k1.block(0, 3, 3, 3) = -eye3; + F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; + F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); + F_k1.block(6, 15, 3, 3) = -R_k2tau.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k1.block(6, 18, 3, 3) = -R_k2tau.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k1.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); + G_k1.block(0, 0, 3, 3) = -eye3; + G_k1.block(3, 3, 3, 3) = eye3; + G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); + G_k1.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_dot_k1 = F_k1; + Eigen::Matrix P_dot_k1 = F_k1 * P_big + P_big * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); + + + //k2------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); + F_k2.block(0, 0, 3, 3) = -w_x; + F_k2.block(0, 3, 3, 3) = -eye3; + F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; + F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); + F_k2.block(6, 15, 3, 3) = -R_mid.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k2.block(6, 18, 3, 3) = -R_mid.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k2.block(12, 6, 3, 3) = eye3; + + // Build noise Jacobian + Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); + G_k2.block(0, 0, 3, 3) = -eye3; + G_k2.block(3, 3, 3, 3) = eye3; + G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); + G_k2.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k2 = Eigen::Matrix::Identity() + Phi_dot_k1 * dt_mid; + Eigen::Matrix P_k2 = P_big + P_dot_k1 * dt_mid; + Eigen::Matrix Phi_dot_k2 = F_k2 * Phi_k2; + Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); + + + //k3------------------------------------------------------------------------------------------------- + + // Our state and noise Jacobians are the same as k2 + // Since k2 and k3 correspond to the same estimates for the midpoint + Eigen::Matrix F_k3 = F_k2; + Eigen::Matrix G_k3 = G_k2; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k3 = Eigen::Matrix::Identity() + Phi_dot_k2 * dt_mid; + Eigen::Matrix P_k3 = P_big + P_dot_k2 * dt_mid; + Eigen::Matrix Phi_dot_k3 = F_k3 * Phi_k3; + Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); + + + //k4------------------------------------------------------------------------------------------------- + + // Build state Jacobian + Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); + F_k4.block(0, 0, 3, 3) = -w_x; + F_k4.block(0, 3, 3, 3) = -eye3; + F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; + F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); + F_k4.block(6, 15, 3, 3) = -R_k2tau1.transpose() * skew_x(R_k2tau * R_G_to_k * grav); + F_k4.block(6, 18, 3, 3) = -R_k2tau1.transpose() * R_k2tau * skew_x(R_G_to_k * grav); + F_k4.block(12, 6, 3, 3) = eye3; + // Build noise Jacobian + Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); + G_k4.block(0, 0, 3, 3) = -eye3; + G_k4.block(3, 3, 3, 3) = eye3; + G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); + G_k4.block(9, 9, 3, 3) = eye3; + + // Get state transition and covariance derivative + Eigen::Matrix Phi_k4 = Eigen::Matrix::Identity() + Phi_dot_k3 * delta_t; + Eigen::Matrix P_k4 = P_big + P_dot_k3 * delta_t; + Eigen::Matrix Phi_dot_k4 = F_k4 * Phi_k4; + Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); + + + //done------------------------------------------------------------------------------------------------- + + // Collect covariance solution + // Ensure it is positive definite + P_big += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); + P_big = 0.5 * (P_big + P_big.transpose()); + + // Calculate the state transition from time k to tau + Eigen::Matrix Phi = Eigen::Matrix::Identity() + + (delta_t / 6.0) * (Phi_dot_k1 + 2.0 * Phi_dot_k2 + 2.0 * Phi_dot_k3 + Phi_dot_k4); + + + + + //========================================================================== + // CLONE TO NEW SAMPLE TIME AND MARGINALIZE OLD SAMPLE TIME + //========================================================================== + + // Matrix that performs the clone and mariginalization + Eigen::Matrix B_k = Eigen::Matrix::Identity(); + B_k.block(15, 15, 3, 3).setZero(); + B_k.block(15, 0, 3, 3) = Eigen::Matrix::Identity(); + + // Change our measurement covariance and state transition + P_big = B_k * P_big * B_k.transpose(); + P_big = 0.5 * (P_big + P_big.transpose()); + Discrete_J_b = B_k * Phi * Discrete_J_b; - //========================================================================== - // MEASUREMENT MEANS - //========================================================================== + // Our measurement covariance is the top 15x15 of our large covariance + P_meas = P_big.block(0, 0, 15, 15); - // Get relative rotation - Eigen::Matrix R_tau2tau1= small_w? eye3 - delta_t * w_x + (pow(delta_t, 2) / 2) * w_x_2: - eye3 - (sin_wt / mag_w) * w_x + ((1.0 - cos_wt) / (pow(mag_w, 2.0))) * w_x_2; + // If we are using the state transition Jacobian, then we should overwrite the analytical versions + // Note: we flip the sign for J_q to match the Model 1 derivation + if (state_transition_jacobians) { + J_q = -Discrete_J_b.block(0, 3, 3, 3); + J_a = Discrete_J_b.block(12, 3, 3, 3); + J_b = Discrete_J_b.block(6, 3, 3, 3); + H_a = Discrete_J_b.block(12, 9, 3, 3); + H_b = Discrete_J_b.block(6, 9, 3, 3); + O_a = Discrete_J_b.block(12, 18, 3, 3); + O_b = Discrete_J_b.block(6, 18, 3, 3); + } - // Updated roation and its transpose - Eigen::Matrix R_k2tau1= R_tau2tau1*R_k2tau; - Eigen::Matrix R_tau12k= R_k2tau1.transpose(); - // If averaging, average the LOCAL acceleration - if (imu_avg){ - a_hat += a_m_1- b_a_lin- R_k2tau1*quat_2_Rot(q_k_lin)*grav; - a_hat = 0.5*a_hat; - } - Eigen::Matrix a_x = skew_x(a_hat); - - // Intermediate variables for evaluating the measurement/bias Jacobian update - double f_1; - double f_2; - double f_3; - double f_4; - - if (small_w) { - f_1 = -(pow(delta_t, 3) / 3); - f_2 = (pow(delta_t, 4) / 8); - f_3 = -(pow(delta_t, 2) / 2); - f_4 = (pow(delta_t, 3) / 6); - } else { - f_1 = (w_dt * cos_wt - sin_wt) / (pow(mag_w, 3)); - f_2 = (pow(w_dt, 2) - 2 * cos_wt - 2 * w_dt * sin_wt + 2) / (2 * pow(mag_w, 4)); - f_3 = -(1 - cos_wt) / pow(mag_w, 2); - f_4 = (w_dt - sin_wt) / pow(mag_w, 3); - } - - // Compute the main part of our analytical means - Eigen::Matrix alpha_arg = ((dt_2 / 2.0) * eye3 + f_1 * w_x + f_2 * w_x_2); - Eigen::Matrix Beta_arg = (delta_t * eye3 + f_3 * w_x + f_4 * w_x_2); - - // Matrices that will multiply the a_hat in the update expressions - Eigen::Matrix H_al = R_tau12k * alpha_arg; - Eigen::Matrix H_be = R_tau12k * Beta_arg; - - // Update the measurements - alpha_tau += beta_tau * delta_t + H_al * a_hat; - beta_tau += H_be * a_hat; - - - //========================================================================== - // BIAS JACOBIANS (ANALYTICAL) - //========================================================================== - - //Get right Jacobian - Eigen::Matrix J_r_tau1 = small_w? eye3 - .5 * w_tx + (1.0 / 6.0) * w_tx * w_tx: - eye3 - ((1 - cos_wt) / (pow((w_dt), 2.0))) * w_tx + - ((w_dt - sin_wt) / (pow(w_dt, 3.0))) * w_tx * w_tx; - - // Update orientation in respect to gyro bias Jacobians - Eigen::Matrix J_save = J_q; - J_q = R_tau2tau1 * J_q + J_r_tau1 * delta_t; - - // Update alpha and beta in respect to accel bias Jacobian - H_a -= H_al; - H_a += delta_t*H_b; - H_b -= H_be; - - // Update alpha and beta in respect to q_GtoLIN Jacobian - Eigen::Matrix g_k = quat_2_Rot(q_k_lin) * grav; - O_a += delta_t * O_b; - O_a += -H_al * R_k2tau * skew_x(g_k); - O_b += -H_be * R_k2tau * skew_x(g_k); - - // Derivatives of R_tau12k wrt bias_w entries - Eigen::MatrixXd d_R_bw_1 = -R_tau12k * skew_x(J_q * e_1); - Eigen::MatrixXd d_R_bw_2 = -R_tau12k * skew_x(J_q * e_2); - Eigen::MatrixXd d_R_bw_3 = -R_tau12k * skew_x(J_q * e_3); - - - // Now compute the gyro bias Jacobian terms - double df_1_dbw_1; - double df_1_dbw_2; - double df_1_dbw_3; + // Update rotation mean + // Note we had to wait to do this, since we use the old orientation in our covariance calculation + R_k2tau = R_k2tau1; + q_k2tau = rot_2_quat(R_k2tau); - double df_2_dbw_1; - double df_2_dbw_2; - double df_2_dbw_3; - double df_3_dbw_1; - double df_3_dbw_2; - double df_3_dbw_3; - - double df_4_dbw_1; - double df_4_dbw_2; - double df_4_dbw_3; - - - if (small_w) { - double df_1_dw_mag = -(pow(delta_t, 5) / 15); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - double df_2_dw_mag = (pow(delta_t, 6) / 72); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = -(pow(delta_t, 4) / 12); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (pow(delta_t, 5) / 60); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; - } - else { - double df_1_dw_mag = (pow(w_dt, 2) * sin_wt - 3 * sin_wt + 3 * w_dt * cos_wt) / pow(mag_w, 5); - df_1_dbw_1 = w_1 * df_1_dw_mag; - df_1_dbw_2 = w_2 * df_1_dw_mag; - df_1_dbw_3 = w_3 * df_1_dw_mag; - - - double df_2_dw_mag = - (pow(w_dt, 2) - 4 * cos_wt - 4 * w_dt * sin_wt + pow(w_dt, 2) * cos_wt + 4) / (pow(mag_w, 6)); - df_2_dbw_1 = w_1 * df_2_dw_mag; - df_2_dbw_2 = w_2 * df_2_dw_mag; - df_2_dbw_3 = w_3 * df_2_dw_mag; - - double df_3_dw_mag = (2 * (cos_wt - 1) + w_dt * sin_wt) / (pow(mag_w, 4)); - df_3_dbw_1 = w_1 * df_3_dw_mag; - df_3_dbw_2 = w_2 * df_3_dw_mag; - df_3_dbw_3 = w_3 * df_3_dw_mag; - - double df_4_dw_mag = (2 * w_dt + w_dt * cos_wt - 3 * sin_wt) / (pow(mag_w, 5)); - df_4_dbw_1 = w_1 * df_4_dw_mag; - df_4_dbw_2 = w_2 * df_4_dw_mag; - df_4_dbw_3 = w_3 * df_4_dw_mag; } - // Gravity rotated into the "tau'th" frame (i.e. start of the measurement interval) - Eigen::Matrix g_tau = R_k2tau*quat_2_Rot(q_k_lin)*grav; - - - // Update gyro bias Jacobians - J_a += J_b * delta_t; - J_a.block(0, 0, 3, 1) += (d_R_bw_1 * alpha_arg + R_tau12k * (df_1_dbw_1 * w_x - f_1 * e_1x + - df_2_dbw_1 * w_x_2 - - f_2 * (e_1x * w_x + w_x * e_1x))) * a_hat - -H_al*skew_x((J_save * e_1))*g_tau; - J_a.block(0, 1, 3, 1) += (d_R_bw_2 * alpha_arg + R_tau12k * (df_1_dbw_2 * w_x - f_1 * e_2x + - df_2_dbw_2 * w_x_2 - - f_2 * (e_2x * w_x + w_x * e_2x))) * a_hat - -H_al*skew_x((J_save * e_2))*g_tau; - J_a.block(0, 2, 3, 1) += (d_R_bw_3 * alpha_arg + R_tau12k * (df_1_dbw_3 * w_x - f_1 * e_3x + - df_2_dbw_3 * w_x_2 - - f_2 * (e_3x * w_x + w_x * e_3x))) * a_hat - -H_al*skew_x((J_save * e_3))*g_tau; - J_b.block(0, 0, 3, 1) += (d_R_bw_1 * Beta_arg + R_tau12k * (df_3_dbw_1 * w_x - f_3 * e_1x + - df_4_dbw_1 * w_x_2 - - f_4 * (e_1x * w_x + w_x * e_1x))) * a_hat- - -H_be*skew_x((J_save * e_1))*g_tau; - J_b.block(0, 1, 3, 1) += (d_R_bw_2 * Beta_arg + R_tau12k * (df_3_dbw_2 * w_x - f_3 * e_2x + - df_4_dbw_2 * w_x_2 - - f_4 * (e_2x * w_x + w_x * e_2x))) * a_hat - -H_be*skew_x((J_save * e_2))*g_tau; - J_b.block(0, 2, 3, 1) += (d_R_bw_3 * Beta_arg + R_tau12k * (df_3_dbw_3 * w_x - f_3 * e_3x + - df_4_dbw_3 * w_x_2 - - f_4 * (e_3x * w_x + w_x * e_3x))) * a_hat - -H_be*skew_x((J_save * e_3))*g_tau; - - - - - //========================================================================== - // MEASUREMENT COVARIANCE - //========================================================================== - - // Going to need orientation at intermediate time i.e. at .5*dt; - Eigen::Matrix R_G_to_k = quat_2_Rot(q_k_lin); - double dt_mid = delta_t / 2.0; - double w_dt_mid = mag_w * dt_mid; - Eigen::Matrix R_mid; - - // The middle of this interval (i.e., rotation from k to mid) - R_mid = small_w ? eye3 - dt_mid * w_x + (pow(dt_mid, 2) / 2) * w_x_2 : - eye3 - (sin(w_dt_mid) / mag_w) * w_x + ((1.0 - cos(w_dt_mid)) / (pow(mag_w, 2.0))) * w_x_2; - R_mid = R_mid * R_k2tau; - - - // Compute covariance (in this implementation, we use RK4) - //k1------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k1 = Eigen::Matrix::Zero(); - F_k1.block(0, 0, 3, 3) = -w_x; - F_k1.block(0, 3, 3, 3) = -eye3; - F_k1.block(6, 0, 3, 3) = -R_k2tau.transpose() * a_x; - F_k1.block(6, 9, 3, 3) = -R_k2tau.transpose(); - F_k1.block(6, 15, 3, 3) = -R_k2tau.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k1.block(6, 18, 3, 3) = -R_k2tau.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k1.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k1 = Eigen::Matrix::Zero(); - G_k1.block(0, 0, 3, 3) = -eye3; - G_k1.block(3, 3, 3, 3) = eye3; - G_k1.block(6, 6, 3, 3) = -R_k2tau.transpose(); - G_k1.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_dot_k1 = F_k1; - Eigen::Matrix P_dot_k1 = F_k1 * P_big + P_big * F_k1.transpose() + G_k1 * Q_c * G_k1.transpose(); - - - //k2------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k2 = Eigen::Matrix::Zero(); - F_k2.block(0, 0, 3, 3) = -w_x; - F_k2.block(0, 3, 3, 3) = -eye3; - F_k2.block(6, 0, 3, 3) = -R_mid.transpose() * a_x; - F_k2.block(6, 9, 3, 3) = -R_mid.transpose(); - F_k2.block(6, 15, 3, 3) = -R_mid.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k2.block(6, 18, 3, 3) = -R_mid.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k2.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k2 = Eigen::Matrix::Zero(); - G_k2.block(0, 0, 3, 3) = -eye3; - G_k2.block(3, 3, 3, 3) = eye3; - G_k2.block(6, 6, 3, 3) = -R_mid.transpose(); - G_k2.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k2 = Eigen::Matrix::Identity() + Phi_dot_k1 * dt_mid; - Eigen::Matrix P_k2 = P_big + P_dot_k1 * dt_mid; - Eigen::Matrix Phi_dot_k2 = F_k2 * Phi_k2; - Eigen::Matrix P_dot_k2 = F_k2 * P_k2 + P_k2 * F_k2.transpose() + G_k2 * Q_c * G_k2.transpose(); - - - //k3------------------------------------------------------------------------------------------------- - - // Our state and noise Jacobians are the same as k2 - // Since k2 and k3 correspond to the same estimates for the midpoint - Eigen::Matrix F_k3 = F_k2; - Eigen::Matrix G_k3 = G_k2; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k3 = Eigen::Matrix::Identity() + Phi_dot_k2 * dt_mid; - Eigen::Matrix P_k3 = P_big + P_dot_k2 * dt_mid; - Eigen::Matrix Phi_dot_k3 = F_k3 * Phi_k3; - Eigen::Matrix P_dot_k3 = F_k3 * P_k3 + P_k3 * F_k3.transpose() + G_k3 * Q_c * G_k3.transpose(); - - - //k4------------------------------------------------------------------------------------------------- - - // Build state Jacobian - Eigen::Matrix F_k4 = Eigen::Matrix::Zero(); - F_k4.block(0, 0, 3, 3) = -w_x; - F_k4.block(0, 3, 3, 3) = -eye3; - F_k4.block(6, 0, 3, 3) = -R_k2tau1.transpose() * a_x; - F_k4.block(6, 9, 3, 3) = -R_k2tau1.transpose(); - F_k4.block(6, 15, 3, 3) = -R_k2tau1.transpose() * skew_x(R_k2tau * R_G_to_k * grav); - F_k4.block(6, 18, 3, 3) = -R_k2tau1.transpose() * R_k2tau * skew_x(R_G_to_k * grav); - F_k4.block(12, 6, 3, 3) = eye3; - - // Build noise Jacobian - Eigen::Matrix G_k4 = Eigen::Matrix::Zero(); - G_k4.block(0, 0, 3, 3) = -eye3; - G_k4.block(3, 3, 3, 3) = eye3; - G_k4.block(6, 6, 3, 3) = -R_k2tau1.transpose(); - G_k4.block(9, 9, 3, 3) = eye3; - - // Get state transition and covariance derivative - Eigen::Matrix Phi_k4 = Eigen::Matrix::Identity() + Phi_dot_k3 * delta_t; - Eigen::Matrix P_k4 = P_big + P_dot_k3 * delta_t; - Eigen::Matrix Phi_dot_k4 = F_k4 * Phi_k4; - Eigen::Matrix P_dot_k4 = F_k4 * P_k4 + P_k4 * F_k4.transpose() + G_k4 * Q_c * G_k4.transpose(); - - - //done------------------------------------------------------------------------------------------------- - - // Collect covariance solution - // Ensure it is positive definite - P_big += (delta_t / 6.0) * (P_dot_k1 + 2.0 * P_dot_k2 + 2.0 * P_dot_k3 + P_dot_k4); - P_big = 0.5 * (P_big + P_big.transpose()); - - // Calculate the state transition from time k to tau - Eigen::Matrix Phi = Eigen::Matrix::Identity() - + (delta_t / 6.0) * (Phi_dot_k1 + 2.0 * Phi_dot_k2 + 2.0 * Phi_dot_k3 + Phi_dot_k4); - - - - - //========================================================================== - // CLONE TO NEW SAMPLE TIME AND MARGINALIZE OLD SAMPLE TIME - //========================================================================== - - // Matrix that performs the clone and mariginalization - Eigen::Matrix B_k = Eigen::Matrix::Identity(); - B_k.block(15,15,3,3).setZero(); - B_k.block(15,0,3,3) = Eigen::Matrix::Identity(); - - // Change our measurement covariance and state transition - P_big = B_k * P_big * B_k.transpose(); - P_big = 0.5 * (P_big + P_big.transpose()); - Discrete_J_b = B_k * Phi * Discrete_J_b; - - // Our measurement covariance is the top 15x15 of our large covariance - P_meas = P_big.block(0,0,15,15); - - // If we are using the state transition Jacobian, then we should overwrite the analytical versions - // Note: we flip the sign for J_q to match the Model 1 derivation - if (state_transition_jacobians){ - J_q = -Discrete_J_b.block(0,3,3,3); - J_a = Discrete_J_b.block(12,3,3,3); - J_b = Discrete_J_b.block(6,3,3,3); - H_a = Discrete_J_b.block(12,9,3,3); - H_b = Discrete_J_b.block(6,9,3,3); - O_a = Discrete_J_b.block(12,18,3,3); - O_b = Discrete_J_b.block(6,18,3,3); - } - - - // Update rotation mean - // Note we had to wait to do this, since we use the old orientation in our covariance calculation - R_k2tau = R_k2tau1; - q_k2tau = rot_2_quat(R_k2tau); - - - } - - -}; + }; +} #endif /* CPI_V2_H */ \ No newline at end of file diff --git a/ov_core/src/state/Options.h b/ov_core/src/state/Options.h deleted file mode 100644 index 16464f1b6..000000000 --- a/ov_core/src/state/Options.h +++ /dev/null @@ -1,41 +0,0 @@ -// -// Created by keck on 6/4/19. -// - -#ifndef OPEN_VINS_OPTIONS_H -#define OPEN_VINS_OPTIONS_H - -/**@brief Struct which stores all our filter options - */ - -struct Options{ - - /// Bool to determine whether or not to do fej - bool do_fej = false; - - /// Bool to determine whether or not use imu message averaging - bool imu_avg = false; - - /// Bool to determine whether or not to calibrate imu-to-camera pose - bool do_calib_camera_pose = false; - - /// Bool to determine whether or not to calibrate camera intrinsics - bool do_calib_camera_intrinsics = false; - - /// Bool to determine whether or not to calibrate camera to IMU time offset - bool do_calib_camera_timeoffset = false; - - /// Max clone size of sliding window - size_t max_clone_size = 8; - - /// Max number of estimated SLAM features - size_t max_slam_features = 0; - - /// Number of cameras - size_t num_cameras = 1; - -}; - - - -#endif //OPEN_VINS_OPTIONS_H diff --git a/ov_core/src/state/Propagator.h b/ov_core/src/state/Propagator.h deleted file mode 100644 index 7789dda88..000000000 --- a/ov_core/src/state/Propagator.h +++ /dev/null @@ -1,149 +0,0 @@ -// -// Created by keck on 6/4/19. -// - -#ifndef OPEN_VINS_PROPAGATOR_H -#define OPEN_VINS_PROPAGATOR_H - -#include "state/StateHelper.h" -#include "utils/quat_ops.h" - - -/**@brief Structure containing the IMU data - */ -struct IMUDATA { - /// timestamp of the reading - double timestamp; - - //Gyroscope reading - Eigen::Matrix wm; - - //Accellerometer reading - Eigen::Matrix am; -}; - -/**@brief Structure containing the IMU noise parameters - */ -struct NoiseManager{ - - /// Gyro white noise covariance - double sigma_w_2; - - /// Gyro random walk covariance - double sigma_wb_2; - - /// Accel white noise covariance - double sigma_a_2; - - /// Accel random walk covariance - double sigma_ab_2; - -}; - -/**@brief Performs the covariance and mean propagation using IMU measurements - */ -class Propagator{ - -public: - - Propagator(NoiseManager noises_, Eigen::Matrix gravity_) : _noises(noises_), - _gravity(gravity_){} - - - /** - * Stores incoming readings - * @param timestamp Timestamp of reading - * @param wm Gyro reading - * @param am Accelerometer reading - */ - void feed_imu(double timestamp, Eigen::Matrix wm, Eigen::Matrix am) { - - // Create our imu data object - IMUDATA data; - data.timestamp = timestamp; - data.wm = wm; - data.am = am; - - // Append it to our vector - imu_data.emplace_back(data); - - } - - - /** @brief Propagate state up to given timestamp and then clone - * @param state Pointer to state - * @timestamp Pointer to progate to and clone at - */ - bool propagateAndClone(State* state, double timestamp); - -protected: - - /// Estimate for time offset at last propagation time - double last_prop_time_offset = 0; - - /** - * @brief Propagates the state forward using the IMU data and computes the noise covariance and state-transition - * matrix of this interval. This function can be replaced with analytical/numerical integration or when using a - * different state representation - * @param imu Pointer to imu - * @param data_minus IMU readings at beginning of interval - * @param data_plus IMU readings at end of interval - * @param F State-transition matrix over the interval - * @param Qd Discrete-time noise covariance over the interval - */ - void predictAndcomputeJacobians(State *state, const IMUDATA data_minus, const IMUDATA data_plus, - Eigen::Matrix &F, Eigen::Matrix &Qd); - - - void propagateSingleInterval(State *state, const IMUDATA &data_minus, const IMUDATA &data_plus, - Eigen::Matrix &Phi, Eigen::Matrix &Qd){ - - Eigen::Matrix F = Eigen::Matrix::Zero(); - Eigen::Matrix Qdi = Eigen::Matrix::Zero(); - - predictAndcomputeJacobians(state, data_minus, data_plus, F, Qdi); - - Phi = F*Phi; - Qd = F*Qd*F.transpose()+Qdi; - } - - /// Container for the noise values - NoiseManager _noises; - - /// Our history of IMU messages (time, angular, linear) - std::vector imu_data; - - /// Gravity vector - - Eigen::Matrix _gravity; - - - /** - * @brief Nice helper function that will linearly interpolate between two imu messages - * This should be used instead of just "cutting" imu messages that bound the camera times - * Give better time offset if we use this function.... - * @param imu_1 IMU at beggining of interpolation interval - * @param imu_2 IMU at end of interpolation interval - * @param timestamp Timestamp being interpolated to - */ - IMUDATA interpolate_data(const IMUDATA imu_1, const IMUDATA imu_2, double timestamp) { - // time-distance lambda - double lambda = (timestamp-imu_1.timestamp)/(imu_2.timestamp-imu_1.timestamp); - //cout << "lambda - " << lambda << endl; - // interpolate between the two times - IMUDATA data; - data.timestamp = timestamp; - data.am = (1-lambda)*imu_1.am+lambda*imu_2.am; - data.wm = (1-lambda)*imu_1.wm+lambda*imu_2.wm; - return data; - } - - - - -}; - - - - -#endif //OPEN_VINS_PROPAGATOR_H diff --git a/ov_core/src/state/State.h b/ov_core/src/state/State.h deleted file mode 100644 index 21471793c..000000000 --- a/ov_core/src/state/State.h +++ /dev/null @@ -1,199 +0,0 @@ -// -// Created by keck on 6/3/19. -// - -#ifndef PROJECT_STATE_H -#define PROJECT_STATE_H - -#include "types/IMU.h" -#include "types/Vec.h" -#include "types/PoseJPL.h" -#include "Options.h" -#include - -/** @brief Class which manages the filter state -*/ - -class Landmark; - -class State{ - -public: - - /** @brief Default Constructor - * @param options_ Options structure containing filter options - */ - State(Options &options_) : _options(options_){} - - ~State(){} - - /** - * @brief Initializes pointers and covariance - * TODO: Read initial values and covariance from options - */ - void initialize_variables(); - - - /** @brief Access IMU pointer - * - */ - IMU *imu(){ - return _imu; - } - - - /** @brief Access reference to covariance - */ - Eigen::MatrixXd& Cov(){ - return _Cov; - } - - /** @brief Access variables in state - */ - std::vector & variables(){ - return _variables; - } - - /** @brief Access variables in state by index in variables vector - * Index in variables vector - */ - Type* variables(size_t i){ - return _variables[i]; - } - - /** @brief Insert new variable - * @param newType Variable to insert - */ - void insert_variable(Type* newType){ - _variables.push_back(newType); - } - - /** - * @brief For a given set of variables, this will this will calculate a smaller covariance - * That only includes the ones specified with all cross terms - * @param small_variables Vector of variables whose marginal covariance is desired - */ - Eigen::MatrixXd get_marginal_covariance(const std::vector &small_variables); - - /** - * @brief Given an update vector, updates each variable - * @param dx Correction vector for the entire filter state - */ - void update(const Eigen::MatrixXd dx){ - for (size_t i=0; i < _variables.size(); i++){ - _variables[i]->update(dx.block(_variables[i]->id(),0,_variables[i]->size(),1)); - } - } - - /** - * @brief Insert new clone - * @param timestamp Timestamp associated with new clone - * @param pose Pointer to new clone pose - */ - void insert_clone(double timestamp, PoseJPL* pose){ - _clones_IMU.insert({timestamp, pose}); - } - - - /// Access current timestamp - double timestamp(){ - return _timestamp; - } - - //Access options - Options& options(){ - return _options; - } - - //Access imu-camera time offset pointer - Vec* calib_dt_CAMtoIMU(){ - return _calib_dt_CAMtoIMU; - } - - /** - * @brief Get clone at a given timestamp - * @param timestamp - */ - PoseJPL* get_clone(double timestamp){ - return _clones_IMU[timestamp]; - } - - /// Get size of covariance - size_t nVars(){ - return _Cov.rows(); - } - - /// Get current number of clones - size_t nClones(){ - return _clones_IMU.size(); - } - - /// Get marginal timestep - double margtimestep() { - double time = INFINITY; - for(std::pair& clone_imu : _clones_IMU) { - if(clone_imu.first < time) { - time = clone_imu.first; - } - } - return time; - } - - /** - * Erases clone associated with timestamp - * @param timestamp Timestamp of clone to erase - */ - void erase_clone(double timestamp){ - _clones_IMU.erase(timestamp); - } - - /** - * Set the current timestamp of the filter - * @param timestamp New timestamp - */ - void set_timestamp(double timestamp){ - _timestamp = timestamp; - } - -protected: - - /// Current timestamp - double _timestamp; - - ///Structure containing filter options - Options _options; - - - /// Covariance of the state - Eigen::MatrixXd _Cov; - - /// Pointer to the "active" IMU state - IMU *_imu; - - - /// Calibration poses for each camera (R_ItoC, p_IinC) - std::map _calib_IMUtoCAM; - - - /// Our current set of SLAM features (3d positions) - std::map _features_SLAM; - - - /// Map between imaging times and clone poses - std::map _clones_IMU; - - /// Time offset base IMU to camera - Vec* _calib_dt_CAMtoIMU; - - /// Camera intrinsics - std::map _cam_intrinsics; - - /// Vector of variables - std::vector _variables; - - -}; - - - -#endif //PROJECT_STATE_H diff --git a/ov_core/src/state/StateHelper.h b/ov_core/src/state/StateHelper.h deleted file mode 100644 index 7756f28ab..000000000 --- a/ov_core/src/state/StateHelper.h +++ /dev/null @@ -1,432 +0,0 @@ -// -// Created by keck on 6/4/19. -// - -#ifndef OPEN_VINS_STATEHELPER_H -#define OPEN_VINS_STATEHELPER_H - -/** @brief Class where all the static functions used to manipulate the covariance are implemented - */ - -#include "State.h" - -class StateHelper{ - -public: - - /** @brief Marginalizes Type marg, properly modifying the ordering/covariances in the state - * @param state Pointer to state - * @param marg Pointer to variable to marginalize - */ - static void marginalize(State* state, Type* marg){ - - // Check if the current state has the GPS enabled - if(std::find(state->variables().begin(),state->variables().end(),marg) == state->variables().end()) { - std::cerr << "CovManager::marginalize() - Called on variable that is not in the state" << std::endl; - std::cerr << "CovManager::marginalize() - Marginalization, does NOT work on sub-variables yet..." << std::endl; - std::exit(EXIT_FAILURE); - } - - //Generic covariance has this form for x_1, x_m, x_2. If we want to remove x_m: - // - // P_(x_1,x_1) P(x_1,x_m) P(x_1,x_2) - // P_(x_m,x_1) P(x_m,x_m) P(x_m,x_2) - // P_(x_2,x_1) P(x_2,x_m) P(x_2,x_2) - // - // to - // - // P_(x_1,x_1) P(x_1,x_2) - // P_(x_2,x_1) P(x_2,x_2) - // - // i.e. x_1 goes from 0 to marg_id, x_2 goes from marg_id+marg_size to Cov.rows() - //in the original covariance - - int marg_size= marg->size(); - int marg_id= marg->id(); - - Eigen::MatrixXd Cov_new( state->nVars()-marg_size, state->nVars()-marg_size); - - int x2_size= state->nVars()-marg_id- marg_size; - - //P_(x_1,x_1) - Cov_new.block(0,0,marg_id,marg_id)= state->Cov().block(0,0,marg_id,marg_id); - - //P_(x_1,x_2) - Cov_new.block(0,marg_id,marg_id,x2_size)= state->Cov().block(0,marg_id+marg_size,marg_id,x2_size); - - //P_(x_2,x_1) - Cov_new.block(marg_id,0,x2_size,marg_id)= Cov_new.block(0,marg_id,marg_id,x2_size).transpose(); - - //P(x_2,x_2) - Cov_new.block(marg_id,marg_id,x2_size,x2_size)= state->Cov().block(marg_id+marg_size,marg_id+marg_size,x2_size,x2_size); - - - //Now set new covariance - state->Cov() = Cov_new; - - - //Now we keep the remaining variables and update their ordering - //Note: DOES NOT SUPPORT MARGINALIZING SUBVARIABLES YET!!!!!!! - std::vector remaining_variables; - for (size_t i=0; i < state->variables().size(); i++){ - //Only keep non-marginal states - if (state->variables(i) != marg){ - if (state->variables(i)->id() > marg_id){ - //If the variable is "beyond" the marginal one in ordering, need - //to "move it forward" - state->variables(i)->set_local_id(state->variables(i)->id()- marg_size); - } - remaining_variables.push_back(state->variables(i)); - } - } - - delete marg; - - - //Now set variables as the remaining ones - state->variables() = remaining_variables; - - } - - /** @brief Remove the oldest clone, if we have more then the max clone count!! - * Note: the marginalizer should have already deleted the clone - * Note: so we just need to remove the pointer to it - * @param state Pointer to state - */ - static void marginalize_old_clone(State* state) { - - if (state->nClones() > state->options().max_clone_size) { - double margTime = state->margtimestep(); - StateHelper::marginalize(state, state->get_clone(margTime)); - state->erase_clone(margTime); - } - - } - - /** - * @brief Clones "variable to clone" and places it at end of covariance - * @param state Pointer to state - * @param variable_to_clone Pointer to variable that will be cloned - */ - static Type* clone(State* state, Type* variable_to_clone) { - - //Get total size of new cloned variables, and the old covariance size - int total_size = variable_to_clone->size(); - int old_size = (int)state->nVars(); - int new_loc = (int)state->nVars(); - - // Resize both our covariance to the new size - state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars()+total_size,state->nVars()+total_size)); - - // What is the new state, and variable we inserted - const std::vector new_variables = state->variables(); - Type* new_clone = nullptr; - - // Loop through all variables, and find the variable that we are going to clone - for (size_t k=0; k < state->variables().size(); k++){ - - // Skip this if it is not the same - Type* type_check = state->variables(k)->check_if_same_variable(variable_to_clone); - if (type_check == nullptr) - continue; - - //So we will clone this one - int old_loc = type_check->id(); - - // Copy the covariance elements - state->Cov().block(new_loc,new_loc,total_size,total_size) = state->Cov().block(old_loc,old_loc,total_size,total_size); - state->Cov().block(0,new_loc,old_size,total_size) = state->Cov().block(0,old_loc,old_size,total_size); - state->Cov().block(new_loc,0,total_size,old_size) = state->Cov().block(old_loc,0,total_size,old_size); - - //Create clone from the type being cloned - new_clone = type_check->clone(); - new_clone->set_local_id(new_loc); - - //Add to variable list - state->insert_variable(new_clone); - break; - - } - - // Check if the current state has the GPS enabled - if(new_clone == nullptr) { - std::cerr << "CovManager::clone() - Called on variable is not in the state" << std::endl; - std::cerr << "CovManager::clone() - Ensure that the variable specified is a variable, or sub-variable.." << std::endl; - std::exit(EXIT_FAILURE); - } - - return new_clone; - - } - - - /** @brief Performs EKF Update - * @param state Pointer to state - * @param H_order Variable ordering used in the compressed Jacobian - * @param H Condensed Jacobian of updating measurement - * @param res residual of updating measurement - * @param R updating measurement covariance - * - */ - static void EKFUpdate(State* state, const std::vector &H_order, const Eigen::MatrixXd &H, - const Eigen::VectorXd &res, const Eigen::MatrixXd &R){ - - //========================================================== - //========================================================== - // Part of the Kalman Gain K = M*S^{-1} - - assert(res.rows() == R.rows()); - assert(H.rows() == res.rows()); - - - Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); - - std::vector H_id; - std::vector H_is_active; - int current_it=0; - - // Get the location in small jacobian for each measuring variable - for (Type* meas_var: H_order){ - H_id.push_back(current_it); - current_it+=meas_var->size(); - } - - auto Cov = state->Cov(); - - //========================================================== - //========================================================== - // For each active variable find its M = P*H^T - for (Type* var: state->variables()) { - // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) - Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); - for (size_t i = 0; i < H_order.size(); i++) { - Type *meas_var = H_order[i]; - M_i.noalias() += Cov.block(var->id(), meas_var->id(), var->size(), meas_var->size()) * - H.block(0, H_id[i], H.rows(), meas_var->size()).transpose(); - } - M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; - } - - //========================================================== - //========================================================== - //Get S - Eigen::MatrixXd P_small = state->get_marginal_covariance(H_order); - - // S = H*Cov*H' + R - Eigen::MatrixXd S(R.rows(), R.rows()); - S.triangularView() = H*P_small*H.transpose(); - S.triangularView() += R; - //S = 0.5*(S+S.transpose()); - - // Inverse our S (should we use a more stable method here??) - Eigen::MatrixXd Sinv = Eigen::MatrixXd::Identity(R.rows(), R.rows()); - S.selfadjointView().llt().solveInPlace(Sinv); - Eigen::MatrixXd K = M_a*Sinv.selfadjointView(); - - // Update Covariance - Cov.triangularView() -= K*M_a.transpose(); - Cov = Cov.selfadjointView(); - - // Calculate our delta and pass it to update all our state variables - state->update(K*res); - - } - - - /** - * @brief Initializes new variable into covariance - * @param state Pointer to state - * @param new_variable Pointer to variable to be initialized - * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian - * @param H_R Jacobian of initializing measurements wrt variables in H_order - * @param H_L Jacobian of initializing measurements wrt new variable - * @param R Covariance of initializing measurements - * @param res Residual of initializing measurements - */ - static void invertible_initialize(State* state, Type* new_variable, const std::vector &H_order, const Eigen::MatrixXd &H_R, - const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res){ - - //========================================================== - //========================================================== - // Part of the Kalman Gain K = M*S^{-1} - - assert(H_L.rows() == H_L.cols()); - assert(new_variable->size() == H_L.rows()); - - Eigen::MatrixXd H_Linv = H_L.inverse(); - - Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); - - std::vector H_id; - std::vector H_is_active; - int current_it=0; - - // Get the location in small jacobian for each measuring variable - for (Type* meas_var: H_order){ - H_id.push_back(current_it); - current_it+=meas_var->size(); - } - - //========================================================== - //========================================================== - // For each active variable find its M = P*H^T - for (Type* var: state->variables()) { - // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) - Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); - for (size_t i = 0; i < H_order.size(); i++) { - Type *meas_var = H_order[i]; - M_i += state->Cov().block(var->id(), meas_var->id(), var->size(), meas_var->size()) * - H_R.block(0, H_id[i], H_R.rows(), meas_var->size()).transpose(); - } - M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; - } - - - //========================================================== - //========================================================== - //Get covariance of this small jacobian - Eigen::MatrixXd P_small= state->get_marginal_covariance(H_order); - - // M = H_R*Cov*H_R' + R - Eigen::MatrixXd M(H_R.rows(), H_R.rows()); - M.triangularView() = H_R*P_small*H_R.transpose(); - M.triangularView() += R; - - // Covariance of the variable/landmark that will be initialized - Eigen::MatrixXd P_LL = H_Linv*M.selfadjointView()*H_Linv.transpose(); - - size_t oldSize = state->nVars(); - - state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars()+new_variable->size(), - state->nVars()+new_variable->size())); - - state->Cov().block(0, oldSize, oldSize, new_variable->size()).noalias() = -M_a*H_Linv.transpose(); - state->Cov().block(oldSize, 0, new_variable->size(), oldSize) = state->Cov().block(0, oldSize, oldSize, new_variable->size()).transpose(); - state->Cov().block(oldSize, oldSize, new_variable->size(), new_variable->size()) = P_LL; - - - // Update the variable that will be initialized (invertible systems can only update the new variable. However this - // Update should be almost zero if we already used a conditional Gauss-Newton to solve for the initial estimate - new_variable->update(H_Linv*res); - - // Now collect results, and add it to the state variables - new_variable->set_local_id((int)(state->nVars()-new_variable->size())); - state->insert_variable(new_variable); - - - } - - /** - * @brief Initializes new variable into covariance. Uses Givens to separate into updating and - * initializing systems (therefore system must be fed as isotropic) - * @param state Pointer to state - * @param new_variable Pointer to variable to be initialized - * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian - * @param H_R Jacobian of initializing measurements wrt variables in H_order - * @param H_L Jacobian of initializing measurements wrt new variable - * @param R Covariance of initializing measurements - * @param res Residual of initializing measurements - */ - static void initialize(State* state, Type* new_variable, const std::vector &H_order, Eigen::MatrixXd &H_R, - Eigen::MatrixXd &H_L, Eigen::MatrixXd &R, Eigen::VectorXd &res){ - - //========================================================== - //========================================================== - // Part of the Kalman Gain K = M*S^{-1} - - size_t new_var_size = new_variable->size(); - assert (new_var_size == H_L.cols()); - - Eigen::JacobiRotation tempHo_GR; - for (int n = 0; n < H_L.cols(); ++n) { - for (int m = (int) H_L.rows() - 1; m > n; m--) { - // Givens matrix G - tempHo_GR.makeGivens(H_L(m - 1, n), H_L(m, n)); - // Multiply G to the corresponding lines (m-1,m) in each matrix - // Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while - // it is equivalent to applying G to the entire cols [0:Ho.cols()-1]. - (H_L.block(m - 1, n, 2, H_L.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); - (res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); - (H_R.block(m - 1, 0, 2, H_R.cols())).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); - } - } - - - //Separate into initializing and updating portions - Eigen::MatrixXd Hxinit = H_R.block(0, 0, new_var_size, H_R.cols()); - Eigen::MatrixXd Hup = H_R.block(new_var_size, 0, H_R.rows() - new_var_size, H_R.cols()); - - Eigen::MatrixXd H_finit = H_L.block(0, 0, new_var_size, new_var_size); - - Eigen::VectorXd resinit = res.block(0, 0, new_var_size, 1); - Eigen::VectorXd resup = res.block(new_var_size, 0, res.rows() - new_var_size, 1); - - Eigen::MatrixXd Rinit = R.block(0,0,new_var_size, new_var_size); - Eigen::MatrixXd Rup = R.block(new_var_size,new_var_size, R.rows()- new_var_size, R.rows() - new_var_size); - - //=========================================== - // Finally, initialize it in our state - StateHelper::invertible_initialize(state, new_variable, H_order, Hxinit, H_finit, - Rinit, resinit); - - //Update with updating portion - if (Hup.rows() > 0) { - StateHelper::EKFUpdate(state, H_order, Hup, resup, Rup); - } - - - } - - /** - * - * @param state Pointer to state - * @param last_w The estimated angular velocity at cloning time (used to estimate imu-cam time offset) - */ - - static void augment_clone(State* state, Eigen::Matrix last_w) { - - auto imu = state->imu(); - auto Cov = state->Cov(); - - // Call on our marginalizer to clone, it will add it to our vector of types - // NOTE: this will clone the clone pose to the END of the covariance... - Type* posetemp = StateHelper::clone(state, imu->pose()); - - // Cast to a JPL pose type - PoseJPL* pose = dynamic_cast(posetemp); - - // Check that it was a valid cast - if(pose == nullptr) { - //ROS_ERROR("INVALID OBJECT RETURNED FROM MARGINALIZER, EXITING!#!@#!@#"); - exit(EXIT_FAILURE); - } - - // Append the new clone to our clone vector - state->insert_clone(state->timestamp(), pose); - - // If we are doing time calibration, then our clones are a function of the time offset - // Logic is based on Mingyang Li and Anastasios I. Mourikis paper: - // http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 - if (state->options().do_calib_camera_timeoffset){ - // Jacobian to augment by - Eigen::Matrix dnc_dt = Eigen::MatrixXd::Zero(6,1); - dnc_dt.block(0,0,3,1)= last_w; - dnc_dt.block(3,0,3,1)= imu->vel(); - // Augment covariance with time offset Jacobian - Cov.block(0,pose->id(),Cov.rows(),6) += Cov.block(0,state->calib_dt_CAMtoIMU()->id(), - Cov.rows(),1)*dnc_dt.transpose(); - Cov.block(pose->id(),0,6,Cov.rows()) += dnc_dt*Cov.block(state->calib_dt_CAMtoIMU()->id(),0,1,Cov.rows()); - Cov.block(pose->id(),pose->id(),6,6) += dnc_dt*Cov(state->calib_dt_CAMtoIMU()->id(), - state->calib_dt_CAMtoIMU()->id())*dnc_dt.transpose(); - } - - } - - - - -}; - - - -#endif //OPEN_VINS_STATEHELPER_H diff --git a/ov_core/src/test_tracking.cpp b/ov_core/src/test_tracking.cpp index ca90a5a34..680ff9dc5 100644 --- a/ov_core/src/test_tracking.cpp +++ b/ov_core/src/test_tracking.cpp @@ -23,6 +23,8 @@ #include "track/TrackDescriptor.h" #include "track/TrackAruco.h" +using namespace ov_core; + // Our feature extractor TrackBase* extractor; diff --git a/ov_core/src/track/Feature.h b/ov_core/src/track/Feature.h index 384ca236d..73f5d50cc 100644 --- a/ov_core/src/track/Feature.h +++ b/ov_core/src/track/Feature.h @@ -6,49 +6,53 @@ #include #include - /** - * @brief Sparse feature class used to collect measurements - * - * This feature class allows for holding of all tracking information for a given feature. - * Each feature has a unique ID assigned to it, and should have a set of feature tracks alongside it. - * See the FeatureDatabase class for details on how we load information into this, and how we delete features. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class Feature -{ +namespace ov_core { -public: + /** + * @brief Sparse feature class used to collect measurements + * + * This feature class allows for holding of all tracking information for a given feature. + * Each feature has a unique ID assigned to it, and should have a set of feature tracks alongside it. + * See the FeatureDatabase class for details on how we load information into this, and how we delete features. + */ + class Feature { - /// Unique ID of this feature - size_t featid; + public: - /// If this feature should be deleted - bool to_delete; + /// Unique ID of this feature + size_t featid; - /// UV coordinates that this feature has been seen from (mapped by camera ID) - std::map> uvs; + /// If this feature should be deleted + bool to_delete; - /// UV normalized coordinates that this feature has been seen from (mapped by camera ID) - std::map> uvs_norm; + /// UV coordinates that this feature has been seen from (mapped by camera ID) + std::map> uvs; - /// Timestamps of each UV measurement (mapped by camera ID) - std::map> timestamps; + /// UV normalized coordinates that this feature has been seen from (mapped by camera ID) + std::map> uvs_norm; - /// Triangulated inverse position of this feature, in the anchor frame - Eigen::Vector3d p_invFinA; + /// Timestamps of each UV measurement (mapped by camera ID) + std::map> timestamps; - /// Triangulated position of this feature, in the anchor frame - Eigen::Vector3d p_FinA; + /// Triangulated inverse position of this feature, in the anchor frame + Eigen::Vector3d p_invFinA; - /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. - size_t anchor_cam_id; + /// Triangulated position of this feature, in the anchor frame + Eigen::Vector3d p_FinA; - /// Triangulated position of this feature, in the global frame - Eigen::Vector3d p_FinG; + /// What camera ID our pose is anchored in!! By default the first measurement is the anchor. + size_t anchor_cam_id; -}; + /// Triangulated position of this feature, in the global frame + Eigen::Vector3d p_FinG; + }; +} #endif /* OV_CORE_FEATURE_H */ \ No newline at end of file diff --git a/ov_core/src/track/FeatureDatabase.h b/ov_core/src/track/FeatureDatabase.h index 75b5d3aab..ad960bec1 100644 --- a/ov_core/src/track/FeatureDatabase.h +++ b/ov_core/src/track/FeatureDatabase.h @@ -9,273 +9,276 @@ /** - * @brief Database containing features we are currently tracking. - * - * Each visual tracker has this database in it and it contains all features that we are tracking. - * The trackers will insert information into this database when they get new measurements from doing tracking. - * A user would then query this database for features that can be used for update and remove them after they have been processed. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class FeatureDatabase -{ - -public: +namespace ov_core { /** - * @brief Default constructor + * @brief Database containing features we are currently tracking. + * + * Each visual tracker has this database in it and it contains all features that we are tracking. + * The trackers will insert information into this database when they get new measurements from doing tracking. + * A user would then query this database for features that can be used for update and remove them after they have been processed. */ - FeatureDatabase() { - features_idlookup = std::map(); - } + class FeatureDatabase { + public: - /** - * @brief Get a specified feature - * @param id What feature we want to get - * @return Either a feature object, or null if it is not in the database. - */ - Feature* get_feature(size_t id) { - if(features_idlookup.find(id) != features_idlookup.end()) { - return features_idlookup[id]; - } else { - return nullptr; + /** + * @brief Default constructor + */ + FeatureDatabase() { + features_idlookup = std::map(); } - } - /** - * @brief Update a feature object - * @param id ID of the feature we will update - * @param timestamp time that this measurement occured at - * @param cam_id which camera this measurement was from - * @param u raw u coordinate - * @param v raw v coordinate - * @param u_n undistorted/normalized u coordinate - * @param v_n undistorted/normalized v coordinate\ - * - * This will update a given feature based on the passed ID it has. - * It will create a new feature, if it is an ID that we have not seen before. - */ - void update_feature(size_t id, double timestamp, size_t cam_id, - float u, float v, float u_n, float v_n) { - - // Find this feature using the ID lookup - if(features_idlookup.find(id) != features_idlookup.end()) { - // Get our feature - Feature* feat = features_idlookup[id]; - // Append this new information to it! - feat->uvs[cam_id].emplace_back(Eigen::Vector2f(u,v)); - feat->uvs_norm[cam_id].emplace_back(Eigen::Vector2f(u_n,v_n)); - feat->timestamps[cam_id].emplace_back(timestamp); - return; + /** + * @brief Get a specified feature + * @param id What feature we want to get + * @return Either a feature object, or null if it is not in the database. + */ + Feature *get_feature(size_t id) { + if (features_idlookup.find(id) != features_idlookup.end()) { + return features_idlookup[id]; + } else { + return nullptr; + } } - // Debug info - //ROS_INFO("featdb - adding new feature %d",(int)id); - // Else we have not found the feature, so lets make it be a new one! - Feature* feat = new Feature(); - feat->featid = id; - feat->uvs[cam_id].emplace_back(Eigen::Vector2f(u,v)); - feat->uvs_norm[cam_id].emplace_back(Eigen::Vector2f(u_n,v_n)); - feat->timestamps[cam_id].emplace_back(timestamp); + /** + * @brief Update a feature object + * @param id ID of the feature we will update + * @param timestamp time that this measurement occured at + * @param cam_id which camera this measurement was from + * @param u raw u coordinate + * @param v raw v coordinate + * @param u_n undistorted/normalized u coordinate + * @param v_n undistorted/normalized v coordinate\ + * + * This will update a given feature based on the passed ID it has. + * It will create a new feature, if it is an ID that we have not seen before. + */ + void update_feature(size_t id, double timestamp, size_t cam_id, + float u, float v, float u_n, float v_n) { + + // Find this feature using the ID lookup + if (features_idlookup.find(id) != features_idlookup.end()) { + // Get our feature + Feature *feat = features_idlookup[id]; + // Append this new information to it! + feat->uvs[cam_id].emplace_back(Eigen::Vector2f(u, v)); + feat->uvs_norm[cam_id].emplace_back(Eigen::Vector2f(u_n, v_n)); + feat->timestamps[cam_id].emplace_back(timestamp); + return; + } - // Append this new feature into our database - features_idlookup.insert({id,feat}); - } + // Debug info + //ROS_INFO("featdb - adding new feature %d",(int)id); + // Else we have not found the feature, so lets make it be a new one! + Feature *feat = new Feature(); + feat->featid = id; + feat->uvs[cam_id].emplace_back(Eigen::Vector2f(u, v)); + feat->uvs_norm[cam_id].emplace_back(Eigen::Vector2f(u_n, v_n)); + feat->timestamps[cam_id].emplace_back(timestamp); - /** - * @brief Get features that do not have newer measurement then the specified time. - * - * This function will return all features that do not a measurement at a time greater than the specified time. - * For example this could be used to get features that have not been successfully tracked into the newest frame. - * All features returned will not have any measurements occurring at a time greater then the specified. - */ - std::vector features_not_containing_newer(double timestamp) { - - // Our vector of features that do not have measurements after the specified time - std::vector feats_old; - - // Now lets loop through all features, and just make sure they are not old - for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end(); ) { - // Loop through each camera - bool has_newer_measurement = false; - for(auto const& pair : (*it).second->timestamps) { - // If we have a measurement greater-than or equal to the specified, this measurement is find - if(!pair.second.empty() && pair.second.at(pair.second.size()-1) >= timestamp) { - has_newer_measurement = true; - break; + // Append this new feature into our database + features_idlookup.insert({id, feat}); + } + + + /** + * @brief Get features that do not have newer measurement then the specified time. + * + * This function will return all features that do not a measurement at a time greater than the specified time. + * For example this could be used to get features that have not been successfully tracked into the newest frame. + * All features returned will not have any measurements occurring at a time greater then the specified. + */ + std::vector features_not_containing_newer(double timestamp) { + + // Our vector of features that do not have measurements after the specified time + std::vector feats_old; + + // Now lets loop through all features, and just make sure they are not old + for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end();) { + // Loop through each camera + bool has_newer_measurement = false; + for (auto const &pair : (*it).second->timestamps) { + // If we have a measurement greater-than or equal to the specified, this measurement is find + if (!pair.second.empty() && pair.second.at(pair.second.size() - 1) >= timestamp) { + has_newer_measurement = true; + break; + } } + // If it is not being actively tracked, then it is old + if (!has_newer_measurement) { + feats_old.push_back((*it).second); + } + it++; } - // If it is not being actively tracked, then it is old - if(!has_newer_measurement) { - feats_old.push_back((*it).second); - } - it++; - } - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; + // Debugging + //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - // Return the old features - return feats_old; + // Return the old features + return feats_old; - } + } - /** - * @brief Get features that has measurements older then the specified time. - * - * This will collect all features that have measurements occurring before the specified timestamp. - * For example, we would want to remove all features older then the last clone/state in our sliding window. - */ - std::vector features_containing_older(double timestamp) { + /** + * @brief Get features that has measurements older then the specified time. + * + * This will collect all features that have measurements occurring before the specified timestamp. + * For example, we would want to remove all features older then the last clone/state in our sliding window. + */ + std::vector features_containing_older(double timestamp) { - // Our vector of old features - std::vector feats_old; + // Our vector of old features + std::vector feats_old; - // Now lets loop through all features, and just make sure they are not old - for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end(); ) { - // Loop through each camera - for(auto const& pair : (*it).second->timestamps) { - if(!pair.second.empty() && pair.second.at(0) < timestamp) { - feats_old.push_back((*it).second); - break; + // Now lets loop through all features, and just make sure they are not old + for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end();) { + // Loop through each camera + for (auto const &pair : (*it).second->timestamps) { + if (!pair.second.empty() && pair.second.at(0) < timestamp) { + feats_old.push_back((*it).second); + break; + } } + it++; } - it++; - } - - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - // Return the old features - return feats_old; + // Debugging + //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - } + // Return the old features + return feats_old; - /** - * @brief Get features that has measurements at the specified time. - * - * This function will return all features that have the specified time in them. - * This would be used to get all features that occurred at a specific clone/state. - */ - std::vector features_containing(double timestamp) { - - // Our vector of old features - std::vector feats_has_timestamp; + } - // Now lets loop through all features, and just make sure they are not - for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end(); ) { - // Boolean if it has the timestamp - bool has_timestamp = false; - for(auto const& pair : (*it).second->timestamps) { - // Loop through all timestamps, and see if it has it - for(auto& timefeat : pair.second) { - if(timefeat == timestamp) { - has_timestamp = true; + /** + * @brief Get features that has measurements at the specified time. + * + * This function will return all features that have the specified time in them. + * This would be used to get all features that occurred at a specific clone/state. + */ + std::vector features_containing(double timestamp) { + + // Our vector of old features + std::vector feats_has_timestamp; + + // Now lets loop through all features, and just make sure they are not + for (std::map::iterator it = features_idlookup.begin(); it != features_idlookup.end();) { + // Boolean if it has the timestamp + bool has_timestamp = false; + for (auto const &pair : (*it).second->timestamps) { + // Loop through all timestamps, and see if it has it + for (auto &timefeat : pair.second) { + if (timefeat == timestamp) { + has_timestamp = true; + break; + } + } + // Break out if we found a single timestamp that is equal to the specified time + if (has_timestamp) { break; } } - // Break out if we found a single timestamp that is equal to the specified time - if(has_timestamp) { - break; + // Remove this feature if it contains the specified timestamp + if (has_timestamp) { + feats_has_timestamp.push_back((*it).second); } + it++; } - // Remove this feature if it contains the specified timestamp - if(has_timestamp) { - feats_has_timestamp.push_back((*it).second); - } - it++; - } - // Debugging - //std::cout << "feature db size = " << features_idlookup.size() << std::endl; - //std::cout << "return vector = " << feats_has_timestamp.size() << std::endl; + // Debugging + //std::cout << "feature db size = " << features_idlookup.size() << std::endl; + //std::cout << "return vector = " << feats_has_timestamp.size() << std::endl; - // Return the features - return feats_has_timestamp; + // Return the features + return feats_has_timestamp; - } + } - /** - * @brief Returns measurements that occurred at a given timestep - * @todo Need to generalize this function so it works for mono+ncam - * - * Given a timestamp, this will return all the raw and normalized feature measurements for this frame. - * This is used to get track information to our structure from motion initializers! - */ - void get_frame_measurements(double timestamp, size_t cam_id_left, size_t cam_id_right, - std::vector& ids, - std::vector& uvs0_n, std::vector& uvs1_n) { - // Now lets loop through all features, and just make sure they are stereo tracks - auto it = features_idlookup.begin(); - while(it != features_idlookup.end()) { - // Our final ids - bool foundl = false; - bool foundr = false; - Eigen::Vector2f uvl, uvr; - // Loop through all timestamps, and see if it has it - for(size_t i=0; i<(*it).second->timestamps[cam_id_left].size(); i++) { - if((*it).second->timestamps[cam_id_left].at(i) == timestamp) { - uvl = (*it).second->uvs_norm[cam_id_left].at(i); - foundl = true; - break; + /** + * @brief Returns measurements that occurred at a given timestep + * @todo Need to generalize this function so it works for mono+ncam + * + * Given a timestamp, this will return all the raw and normalized feature measurements for this frame. + * This is used to get track information to our structure from motion initializers! + */ + void get_frame_measurements(double timestamp, size_t cam_id_left, size_t cam_id_right, + std::vector &ids, + std::vector &uvs0_n, std::vector &uvs1_n) { + // Now lets loop through all features, and just make sure they are stereo tracks + auto it = features_idlookup.begin(); + while (it != features_idlookup.end()) { + // Our final ids + bool foundl = false; + bool foundr = false; + Eigen::Vector2f uvl, uvr; + // Loop through all timestamps, and see if it has it + for (size_t i = 0; i < (*it).second->timestamps[cam_id_left].size(); i++) { + if ((*it).second->timestamps[cam_id_left].at(i) == timestamp) { + uvl = (*it).second->uvs_norm[cam_id_left].at(i); + foundl = true; + break; + } } - } - for(size_t i=0; i<(*it).second->timestamps[cam_id_right].size(); i++) { - if((*it).second->timestamps[cam_id_right].at(i) == timestamp) { - uvr = (*it).second->uvs_norm[cam_id_right].at(i); - foundr = true; - break; + for (size_t i = 0; i < (*it).second->timestamps[cam_id_right].size(); i++) { + if ((*it).second->timestamps[cam_id_right].at(i) == timestamp) { + uvr = (*it).second->uvs_norm[cam_id_right].at(i); + foundr = true; + break; + } } + // If found in both left and right, then lets add it! + if (foundl && foundr) { + ids.push_back((*it).second->featid); + uvs0_n.push_back(uvl); + uvs1_n.push_back(uvr); + } + it++; } - // If found in both left and right, then lets add it! - if(foundl && foundr) { - ids.push_back((*it).second->featid); - uvs0_n.push_back(uvl); - uvs1_n.push_back(uvr); - } - it++; } - } - /** - * This function will delete all features that have been used up - * If a feature was unable to be used, it will still remain since it will not have a delete flag set - */ - void cleanup() { - // Debug - //int sizebefore = (int)features_idlookup.size(); - // Loop through all features - for (auto it = features_idlookup.begin(); it != features_idlookup.end(); ) { - // If delete flag is set, then delete it - if((*it).second->to_delete) { - delete (*it).second; - features_idlookup.erase(it++); - } else { - it++; + /** + * This function will delete all features that have been used up + * If a feature was unable to be used, it will still remain since it will not have a delete flag set + */ + void cleanup() { + // Debug + //int sizebefore = (int)features_idlookup.size(); + // Loop through all features + for (auto it = features_idlookup.begin(); it != features_idlookup.end();) { + // If delete flag is set, then delete it + if ((*it).second->to_delete) { + delete (*it).second; + features_idlookup.erase(it++); + } else { + it++; + } } + // Debug + //cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << endl; } - // Debug - //cout << "feat db = " << sizebefore << " -> " << (int)features_idlookup.size() << endl; - } - - - -private: + private: - // Our lookup array that allow use to query based on ID - std::map features_idlookup; + // Our lookup array that allow use to query based on ID + std::map features_idlookup; -}; + }; +} #endif /* OV_CORE_FEATURE_DATABASE_H */ \ No newline at end of file diff --git a/ov_core/src/track/Grider_DOG.h b/ov_core/src/track/Grider_DOG.h index 136547040..a80ea87ce 100644 --- a/ov_core/src/track/Grider_DOG.h +++ b/ov_core/src/track/Grider_DOG.h @@ -13,143 +13,151 @@ /** - * @brief Does Difference of Gaussian (DoG) in a grid pattern. - * - * This does "Difference of Gaussian" detection in a grid pattern to try to get good features. - * We then pick the top features in each grid, and return the top features collected over the entire image. - * This class hasn't been tested that much, as we normally use the Grider_FAST class instead. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class Grider_DOG { - -public: - +namespace ov_core { /** - * @brief Compare keypoints based on their response value. - * @param first First keypoint - * @param second Second keypoint + * @brief Does Difference of Gaussian (DoG) in a grid pattern. * - * We want to have the keypoints with the highest values! - * See: https://stackoverflow.com/a/10910921 + * This does "Difference of Gaussian" detection in a grid pattern to try to get good features. + * We then pick the top features in each grid, and return the top features collected over the entire image. + * This class hasn't been tested that much, as we normally use the Grider_FAST class instead. */ - static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { - return first.response > second.response; - } + class Grider_DOG { + public: - /** - * @brief For a given small image region this will do the Difference of Gaussian (DOG) detection - * - * Will return the vector of keypoints with the averaged response for that given UV. - * See: https://github.com/jrdi/opencv-examples/blob/master/dog/main.cpp - */ - static void detect(const cv::Mat &img, std::vector& pts, int ksize, - float sigma_small, float sigma_big, float threshold) { - - // Calculate the two kernels we will use - cv::Mat gauBig = cv::getGaussianKernel(ksize, sigma_big, CV_32F); - cv::Mat gauSmall = cv::getGaussianKernel(ksize, sigma_small, CV_32F); - cv::Mat DoG = gauSmall - gauBig; - - // Apply the kernel to our image - cv::Mat img_filtered; - cv::sepFilter2D(img, img_filtered, CV_32F, DoG.t(), DoG); - img_filtered = cv::abs(img_filtered); - - // Assert we are a grey scaled image - assert(img_filtered.channels()==1); - - // Loop through and record all points above our threshold - for( int j = 0; j < img_filtered.rows ; j++ ) { - for( int i = 0; i < img_filtered.cols; i++ ) { - // Calculate the response at this u,v coordinate - float response = img_filtered.at(j, i); - // Check to see if it is able our specified threshold - if(response >= threshold) { - cv::KeyPoint pt; - pt.pt.x = i; - pt.pt.y = j; - pt.response = response; - pts.push_back(pt); + + /** + * @brief Compare keypoints based on their response value. + * @param first First keypoint + * @param second Second keypoint + * + * We want to have the keypoints with the highest values! + * See: https://stackoverflow.com/a/10910921 + */ + static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { + return first.response > second.response; + } + + + /** + * @brief For a given small image region this will do the Difference of Gaussian (DOG) detection + * + * Will return the vector of keypoints with the averaged response for that given UV. + * See: https://github.com/jrdi/opencv-examples/blob/master/dog/main.cpp + */ + static void detect(const cv::Mat &img, std::vector &pts, int ksize, + float sigma_small, float sigma_big, float threshold) { + + // Calculate the two kernels we will use + cv::Mat gauBig = cv::getGaussianKernel(ksize, sigma_big, CV_32F); + cv::Mat gauSmall = cv::getGaussianKernel(ksize, sigma_small, CV_32F); + cv::Mat DoG = gauSmall - gauBig; + + // Apply the kernel to our image + cv::Mat img_filtered; + cv::sepFilter2D(img, img_filtered, CV_32F, DoG.t(), DoG); + img_filtered = cv::abs(img_filtered); + + // Assert we are a grey scaled image + assert(img_filtered.channels() == 1); + + // Loop through and record all points above our threshold + for (int j = 0; j < img_filtered.rows; j++) { + for (int i = 0; i < img_filtered.cols; i++) { + // Calculate the response at this u,v coordinate + float response = img_filtered.at(j, i); + // Check to see if it is able our specified threshold + if (response >= threshold) { + cv::KeyPoint pt; + pt.pt.x = i; + pt.pt.y = j; + pt.response = response; + pts.push_back(pt); + } } } + } - } - - /** - * @brief This function will perform grid extraction using Difference of Gaussian (DOG) - * @param img Image we will do FAST extraction on - * @param pts vector of extracted points we will return - * @param num_features max number of features we want to extract - * @param grid_x size of grid in the x-direction / u-direction - * @param grid_y size of grid in the y-direction / v-direction - * @param ksize kernel size - * @param sigma_small small gaussian sigma - * @param sigma_big big gaussian sigma - * @param threshold response threshold - * - * Given a specified grid size, this will try to extract fast features from each grid. - * It will then return the best from each grid in the return vector. - */ - static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, - int grid_x, int grid_y, int ksize, float sigma_small, float sigma_big, float threshold) { - - // Calculate the size our extraction boxes should be - int size_x = img.cols / grid_x; - int size_y = img.rows / grid_y; - - // Make sure our sizes are not zero - assert(size_x > 0); - assert(size_y > 0); - - // We want to have equally distributed features - auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; - - // Lets loop through each grid and extract features - for (int x = 0; x < img.cols; x += size_x) { - for (int y = 0; y < img.rows; y += size_y) { - - // Skip if we are out of bounds - if (x + size_x > img.cols || y + size_y > img.rows) - continue; - - // Calculate where we should be extracting from - cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); - - // Extract DOG features for this part of the image - std::vector pts_new; - Grider_DOG::detect(img(img_roi), pts_new, ksize, sigma_small, sigma_big, threshold); - - // Now lets get the top number from this - std::sort(pts_new.begin(), pts_new.end(), Grider_DOG::compare_response); - - // Debug print out the response values - //for (auto pt : pts_new) { - // std::cout << pt.response << std::endl; - //} - //std::cout << "======================" << std::endl; - - // Append the "best" ones to our vector - // Note that we need to "correct" the point u,v since we extracted it in a ROI - // So we should append the location of that ROI in the image - for(size_t i=0; i<(size_t)num_features_grid && i &pts, int num_features, + int grid_x, int grid_y, int ksize, float sigma_small, float sigma_big, + float threshold) { + + // Calculate the size our extraction boxes should be + int size_x = img.cols / grid_x; + int size_y = img.rows / grid_y; + + // Make sure our sizes are not zero + assert(size_x > 0); + assert(size_y > 0); + + // We want to have equally distributed features + auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; + + // Lets loop through each grid and extract features + for (int x = 0; x < img.cols; x += size_x) { + for (int y = 0; y < img.rows; y += size_y) { + + // Skip if we are out of bounds + if (x + size_x > img.cols || y + size_y > img.rows) + continue; + + // Calculate where we should be extracting from + cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); + + // Extract DOG features for this part of the image + std::vector pts_new; + Grider_DOG::detect(img(img_roi), pts_new, ksize, sigma_small, sigma_big, threshold); + + // Now lets get the top number from this + std::sort(pts_new.begin(), pts_new.end(), Grider_DOG::compare_response); + + // Debug print out the response values + //for (auto pt : pts_new) { + // std::cout << pt.response << std::endl; + //} + //std::cout << "======================" << std::endl; + + // Append the "best" ones to our vector + // Note that we need to "correct" the point u,v since we extracted it in a ROI + // So we should append the location of that ROI in the image + for (size_t i = 0; i < (size_t) num_features_grid && i < pts_new.size(); i++) { + cv::KeyPoint pt_cor = pts_new.at(i); + pt_cor.pt.x += x; + pt_cor.pt.y += y; + pts.push_back(pt_cor); + } + } } - } - } + } -}; + }; +} #endif /* OV_CORE_GRIDER_DOG_H */ \ No newline at end of file diff --git a/ov_core/src/track/Grider_FAST.h b/ov_core/src/track/Grider_FAST.h index 60185c4d9..57a8ba36b 100644 --- a/ov_core/src/track/Grider_FAST.h +++ b/ov_core/src/track/Grider_FAST.h @@ -13,100 +13,108 @@ /** - * @brief Extracts FAST features in a grid pattern. - * - * As compared to just extracting fast features over the entire image, - * we want to have as uniform of extractions as possible over the image plane. - * Thus we split the image into a bunch of small grids, and extract points in each. - * We then pick enough top points in each grid so that we have the total number of desired points. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class Grider_FAST { - -public: - +namespace ov_core { /** - * @brief Compare keypoints based on their response value. - * @param first First keypoint - * @param second Second keypoint + * @brief Extracts FAST features in a grid pattern. * - * We want to have the keypoints with the highest values! - * See: https://stackoverflow.com/a/10910921 + * As compared to just extracting fast features over the entire image, + * we want to have as uniform of extractions as possible over the image plane. + * Thus we split the image into a bunch of small grids, and extract points in each. + * We then pick enough top points in each grid so that we have the total number of desired points. */ - static bool compare_response(cv::KeyPoint first, cv::KeyPoint second) { - return first.response > second.response; - } + class Grider_FAST { + public: - /** - * @brief This function will perform grid extraction using FAST. - * @param img Image we will do FAST extraction on - * @param pts vector of extracted points we will return - * @param num_features max number of features we want to extract - * @param grid_x size of grid in the x-direction / u-direction - * @param grid_y size of grid in the y-direction / v-direction - * @param threshold FAST threshold paramter (10 is a good value normally) - * @param nonmaxSuppression if FAST should perform non-max suppression (true normally) - * - * Given a specified grid size, this will try to extract fast features from each grid. - * It will then return the best from each grid in the return vector. - */ - static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, - int grid_x, int grid_y, int threshold, bool nonmaxSuppression) { - - // Calculate the size our extraction boxes should be - int size_x = img.cols / grid_x; - int size_y = img.rows / grid_y; - - // Make sure our sizes are not zero - assert(size_x > 0); - assert(size_y > 0); - - // We want to have equally distributed features - auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; - - // Lets loop through each grid and extract features - for (int x = 0; x < img.cols; x += size_x) { - for (int y = 0; y < img.rows; y += size_y) { - - // Skip if we are out of bounds - if (x + size_x > img.cols || y + size_y > img.rows) - continue; - - // Calculate where we should be extracting from - cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); - - // Extract FAST features for this part of the image - std::vector pts_new; - cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression); - - // Now lets get the top number from this - std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response); - - // Debug print out the response values - //for (auto pt : pts_new) { - // std::cout << pt.response << std::endl; - //} - //std::cout << "======================" << std::endl; - - // Append the "best" ones to our vector - // Note that we need to "correct" the point u,v since we extracted it in a ROI - // So we should append the location of that ROI in the image - for(size_t i=0; i<(size_t)num_features_grid && i second.response; + } + + + /** + * @brief This function will perform grid extraction using FAST. + * @param img Image we will do FAST extraction on + * @param pts vector of extracted points we will return + * @param num_features max number of features we want to extract + * @param grid_x size of grid in the x-direction / u-direction + * @param grid_y size of grid in the y-direction / v-direction + * @param threshold FAST threshold paramter (10 is a good value normally) + * @param nonmaxSuppression if FAST should perform non-max suppression (true normally) + * + * Given a specified grid size, this will try to extract fast features from each grid. + * It will then return the best from each grid in the return vector. + */ + static void perform_griding(const cv::Mat &img, std::vector &pts, int num_features, + int grid_x, int grid_y, int threshold, bool nonmaxSuppression) { + + // Calculate the size our extraction boxes should be + int size_x = img.cols / grid_x; + int size_y = img.rows / grid_y; + + // Make sure our sizes are not zero + assert(size_x > 0); + assert(size_y > 0); + + // We want to have equally distributed features + auto num_features_grid = (int) (num_features / (grid_x * grid_y)) + 1; + + // Lets loop through each grid and extract features + for (int x = 0; x < img.cols; x += size_x) { + for (int y = 0; y < img.rows; y += size_y) { + + // Skip if we are out of bounds + if (x + size_x > img.cols || y + size_y > img.rows) + continue; + + // Calculate where we should be extracting from + cv::Rect img_roi = cv::Rect(x, y, size_x, size_y); + + // Extract FAST features for this part of the image + std::vector pts_new; + cv::FAST(img(img_roi), pts_new, threshold, nonmaxSuppression); + + // Now lets get the top number from this + std::sort(pts_new.begin(), pts_new.end(), Grider_FAST::compare_response); + + // Debug print out the response values + //for (auto pt : pts_new) { + // std::cout << pt.response << std::endl; + //} + //std::cout << "======================" << std::endl; + + // Append the "best" ones to our vector + // Note that we need to "correct" the point u,v since we extracted it in a ROI + // So we should append the location of that ROI in the image + for (size_t i = 0; i < (size_t) num_features_grid && i < pts_new.size(); i++) { + cv::KeyPoint pt_cor = pts_new.at(i); + pt_cor.pt.x += x; + pt_cor.pt.y += y; + pts.push_back(pt_cor); + } + + } } + } - } + }; -}; +} #endif /* OV_CORE_GRIDER_FAST_H */ \ No newline at end of file diff --git a/ov_core/src/track/TrackAruco.cpp b/ov_core/src/track/TrackAruco.cpp index 5eb392f03..ae2af63db 100644 --- a/ov_core/src/track/TrackAruco.cpp +++ b/ov_core/src/track/TrackAruco.cpp @@ -1,6 +1,9 @@ #include "TrackAruco.h" +using namespace ov_core; + + void TrackAruco::feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) { // Start timing diff --git a/ov_core/src/track/TrackAruco.h b/ov_core/src/track/TrackAruco.h index 28de6ed28..d733230fe 100644 --- a/ov_core/src/track/TrackAruco.h +++ b/ov_core/src/track/TrackAruco.h @@ -6,101 +6,108 @@ #include "TrackBase.h" - /** - * @brief Tracking of OpenCV Aruoc tags. - * - * This class handles the tracking of [OpenCV Aruco tags](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco). - * We track the top left corner of the tag as compared to the pose of the tag or any other corners. - * Right now we hardcode the dictionary to be `cv::aruco::DICT_6X6_25`, so please generate tags in this family of tags. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class TrackAruco : public TrackBase -{ - -public: - - /** - * @brief Public default constructor - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - */ - TrackAruco(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye): - TrackBase(camera_k,camera_d,camera_fisheye), max_tag_id(1024), do_downsizing(false) { - aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); - aruco_params = cv::aruco::DetectorParameters::create(); - } - - /** - * @brief Public constructor with configuration variables - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - * @param numaruco the max id of the arucotags, we don't use any tags greater than this value even if we extract them - * @param do_downsizing we can scale the image by 1/2 to increase Aruco tag extraction speed - */ - explicit TrackAruco(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye, int numaruco, bool do_downsizing): - TrackBase(camera_k,camera_d,camera_fisheye,0,numaruco),max_tag_id(numaruco), do_downsizing(do_downsizing) { - aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); - aruco_params = cv::aruco::DetectorParameters::create(); - } - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - +namespace ov_core { /** - * @brief We override the display equation so we can show the tags we extract. - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in + * @brief Tracking of OpenCV Aruoc tags. + * + * This class handles the tracking of [OpenCV Aruco tags](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco). + * We track the top left corner of the tag as compared to the pose of the tag or any other corners. + * Right now we hardcode the dictionary to be `cv::aruco::DICT_6X6_25`, so please generate tags in this family of tags. */ - void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) override; - - -protected: - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Max tag ID we should extract from (i.e., number of aruco tags starting from zero) - int max_tag_id; - - // If we should downsize the image - bool do_downsizing; - - // Our dictionary that we will extract aruco tags with - cv::Ptr aruco_dict; - - // Parameters the opencv extractor uses - cv::Ptr aruco_params; - - // Our tag IDs and corner we will get from the extractor - std::map> ids_aruco; - std::map>> corners, rejects; - - -}; - + class TrackAruco : public TrackBase { + + public: + + /** + * @brief Public default constructor + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + */ + TrackAruco(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye) : + TrackBase(camera_k, camera_d, camera_fisheye), max_tag_id(1024), do_downsizing(false) { + aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); + aruco_params = cv::aruco::DetectorParameters::create(); + } + + /** + * @brief Public constructor with configuration variables + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + * @param numaruco the max id of the arucotags, we don't use any tags greater than this value even if we extract them + * @param do_downsizing we can scale the image by 1/2 to increase Aruco tag extraction speed + */ + explicit TrackAruco(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye, int numaruco, bool do_downsizing) : + TrackBase(camera_k, camera_d, camera_fisheye, 0, numaruco), max_tag_id(numaruco), + do_downsizing(do_downsizing) { + aruco_dict = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250); + aruco_params = cv::aruco::DetectorParameters::create(); + } + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, + size_t cam_id_right) override; + + + /** + * @brief We override the display equation so we can show the tags we extract. + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + */ + void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) override; + + + protected: + + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Max tag ID we should extract from (i.e., number of aruco tags starting from zero) + int max_tag_id; + + // If we should downsize the image + bool do_downsizing; + + // Our dictionary that we will extract aruco tags with + cv::Ptr aruco_dict; + + // Parameters the opencv extractor uses + cv::Ptr aruco_params; + + // Our tag IDs and corner we will get from the extractor + std::map> ids_aruco; + std::map>> corners, rejects; + + + }; + +} #endif /* OV_CORE_TRACK_ARUCO_H */ \ No newline at end of file diff --git a/ov_core/src/track/TrackBase.cpp b/ov_core/src/track/TrackBase.cpp index b321c058d..452fe11bf 100644 --- a/ov_core/src/track/TrackBase.cpp +++ b/ov_core/src/track/TrackBase.cpp @@ -1,5 +1,7 @@ #include "TrackBase.h" -#include "state/Propagator.h" + + +using namespace ov_core; void TrackBase::display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2) { diff --git a/ov_core/src/track/TrackBase.h b/ov_core/src/track/TrackBase.h index 48ee1551c..138a9c5a3 100644 --- a/ov_core/src/track/TrackBase.h +++ b/ov_core/src/track/TrackBase.h @@ -16,248 +16,254 @@ #include "Grider_DOG.h" #include "FeatureDatabase.h" - /** - * @brief Visual feature tracking base class - * - * This is the base class for all our visual trackers. - * The goal here is to provide a common interface so all underlying trackers can simply hide away all the complexities. - * We have something called the "feature database" which has all the tracking information inside of it. - * The user can ask this database for features which can then be used in an MSCKF or batch-based setting. - * The feature tracks store both the raw (distorted) and undistorted/normalized values. - * Right now we just support two camera models, see: undistort_point_brown() and undistort_point_fisheye(). - * - * This base class also handles most of the heavy lifting with the visualalization, but the sub-classes can override - * this and do their own logic if they want (i.e. the auroctag tracker has its own logic for visualization). + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class TrackBase -{ +namespace ov_core { -public: /** - * @brief Public default constructor - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + * @brief Visual feature tracking base class + * + * This is the base class for all our visual trackers. + * The goal here is to provide a common interface so all underlying trackers can simply hide away all the complexities. + * We have something called the "feature database" which has all the tracking information inside of it. + * The user can ask this database for features which can then be used in an MSCKF or batch-based setting. + * The feature tracks store both the raw (distorted) and undistorted/normalized values. + * Right now we just support two camera models, see: undistort_point_brown() and undistort_point_fisheye(). + * + * This base class also handles most of the heavy lifting with the visualalization, but the sub-classes can override + * this and do their own logic if they want (i.e. the auroctag tracker has its own logic for visualization). */ - TrackBase(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye): - database(new FeatureDatabase()), num_features(200) { - // Save the camera parameters - this->camera_k = camera_k; - this->camera_d = camera_d; - this->camera_fisheye = camera_fisheye; - // Convert values to the OpenCV format - for(auto const& camKp : camera_k) { - cv::Matx33d tempK; - tempK(0,0) = camKp.second(0,0); - tempK(0,1) = camKp.second(0,1); - tempK(0,2) = camKp.second(0,2); - tempK(1,0) = camKp.second(1,0); - tempK(1,1) = camKp.second(1,1); - tempK(1,2) = camKp.second(1,2); - tempK(2,0) = camKp.second(2,0); - tempK(2,1) = camKp.second(2,1); - tempK(2,2) = camKp.second(2,2); - camera_k_OPENCV.insert({camKp.first,tempK}); + class TrackBase { + + public: + + /** + * @brief Public default constructor + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + */ + TrackBase(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye) : + database(new FeatureDatabase()), num_features(200) { + // Save the camera parameters + this->camera_k = camera_k; + this->camera_d = camera_d; + this->camera_fisheye = camera_fisheye; + // Convert values to the OpenCV format + for (auto const &camKp : camera_k) { + cv::Matx33d tempK; + tempK(0, 0) = camKp.second(0, 0); + tempK(0, 1) = camKp.second(0, 1); + tempK(0, 2) = camKp.second(0, 2); + tempK(1, 0) = camKp.second(1, 0); + tempK(1, 1) = camKp.second(1, 1); + tempK(1, 2) = camKp.second(1, 2); + tempK(2, 0) = camKp.second(2, 0); + tempK(2, 1) = camKp.second(2, 1); + tempK(2, 2) = camKp.second(2, 2); + camera_k_OPENCV.insert({camKp.first, tempK}); + } + for (auto const &camDp : camera_d) { + cv::Vec4d tempD; + tempD(0) = camDp.second(0, 0); + tempD(1) = camDp.second(1, 0); + tempD(2) = camDp.second(2, 0); + tempD(3) = camDp.second(3, 0); + camera_d_OPENCV.insert({camDp.first, tempD}); + } } - for(auto const& camDp : camera_d) { - cv::Vec4d tempD; - tempD(0) = camDp.second(0,0); - tempD(1) = camDp.second(1,0); - tempD(2) = camDp.second(2,0); - tempD(3) = camDp.second(3,0); - camera_d_OPENCV.insert({camDp.first,tempD}); - } - } - /** - * @brief Public constructor with configuration variables - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - */ - TrackBase(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye, - int numfeats, int numaruco): - database(new FeatureDatabase()), num_features(numfeats) { - // Our current feature ID should be larger then the number of aruco tags we have - currid = (size_t)numaruco+1; - // Save the camera parameters - this->camera_k = camera_k; - this->camera_d = camera_d; - this->camera_fisheye = camera_fisheye; - // Convert values to the OpenCV format - for(auto const& camKp : camera_k) { - cv::Matx33d tempK; - tempK(0,0) = camKp.second(0,0); - tempK(0,1) = camKp.second(0,1); - tempK(0,2) = camKp.second(0,2); - tempK(1,0) = camKp.second(1,0); - tempK(1,1) = camKp.second(1,1); - tempK(1,2) = camKp.second(1,2); - tempK(2,0) = camKp.second(2,0); - tempK(2,1) = camKp.second(2,1); - tempK(2,2) = camKp.second(2,2); - camera_k_OPENCV.insert({camKp.first,tempK}); - } - for(auto const& camDp : camera_d) { - cv::Vec4d tempD; - tempD(0) = camDp.second(0,0); - tempD(1) = camDp.second(1,0); - tempD(2) = camDp.second(2,0); - tempD(3) = camDp.second(3,0); - camera_d_OPENCV.insert({camDp.first,tempD}); + /** + * @brief Public constructor with configuration variables + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + */ + TrackBase(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye, + int numfeats, int numaruco) : + database(new FeatureDatabase()), num_features(numfeats) { + // Our current feature ID should be larger then the number of aruco tags we have + currid = (size_t) numaruco + 1; + // Save the camera parameters + this->camera_k = camera_k; + this->camera_d = camera_d; + this->camera_fisheye = camera_fisheye; + // Convert values to the OpenCV format + for (auto const &camKp : camera_k) { + cv::Matx33d tempK; + tempK(0, 0) = camKp.second(0, 0); + tempK(0, 1) = camKp.second(0, 1); + tempK(0, 2) = camKp.second(0, 2); + tempK(1, 0) = camKp.second(1, 0); + tempK(1, 1) = camKp.second(1, 1); + tempK(1, 2) = camKp.second(1, 2); + tempK(2, 0) = camKp.second(2, 0); + tempK(2, 1) = camKp.second(2, 1); + tempK(2, 2) = camKp.second(2, 2); + camera_k_OPENCV.insert({camKp.first, tempK}); + } + for (auto const &camDp : camera_d) { + cv::Vec4d tempD; + tempD(0) = camDp.second(0, 0); + tempD(1) = camDp.second(1, 0); + tempD(2) = camDp.second(2, 0); + tempD(3) = camDp.second(3, 0); + camera_d_OPENCV.insert({camDp.first, tempD}); + } } - } - - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - virtual void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) = 0; - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - virtual void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) = 0; - /** - * @brief Shows features extracted in the last image - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in - */ - virtual void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); - - /** - * @brief Shows a "trail" for each feature (i.e. its history) - * @param img_out image to which we will overlayed features on - * @param r1,g1,b1 first color to draw in - * @param r2,g2,b2 second color to draw in - */ - virtual void display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); - - /** - * @brief Get the feature database with all the track information - * @return FeatureDatabase pointer that one can query for features - */ - FeatureDatabase* get_feature_database() { - return database; - } - -protected: - - /** - * @brief Main function that will undistort/normalize a point. - * @param pt_in uv 2x1 point that we will undistort - * @param cam_id id of which camera this point is in - * @return undistorted 2x1 point - * - * Given a uv point, this will undistort it based on the camera matrices. - * This will call on the model needed, depending on what type of camera it is! - * So if we have fisheye for camera_1 is true, we will undistort with the fisheye model. - * In Kalibr's terms, the non-fisheye is `pinhole-radtan` while the fisheye is the `pinhole-equi` model. - */ - cv::Point2f undistort_point(cv::Point2f pt_in, size_t cam_id) { - // Determine what camera parameters we should use - cv::Matx33d camK = this->camera_k_OPENCV[cam_id]; - cv::Vec4d camD = this->camera_d_OPENCV[cam_id]; - // Call on the fisheye if we should! - if(this->camera_fisheye[cam_id]) { - return undistort_point_fisheye(pt_in,camK,camD); + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + virtual void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) = 0; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + virtual void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, + size_t cam_id_right) = 0; + + /** + * @brief Shows features extracted in the last image + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + */ + virtual void display_active(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); + + /** + * @brief Shows a "trail" for each feature (i.e. its history) + * @param img_out image to which we will overlayed features on + * @param r1,g1,b1 first color to draw in + * @param r2,g2,b2 second color to draw in + */ + virtual void display_history(cv::Mat &img_out, int r1, int g1, int b1, int r2, int g2, int b2); + + /** + * @brief Get the feature database with all the track information + * @return FeatureDatabase pointer that one can query for features + */ + FeatureDatabase *get_feature_database() { + return database; } - return undistort_point_brown(pt_in,camK,camD); - } - /** - * @brief Undistort function RADTAN/BROWN. - * - * Given a uv point, this will undistort it based on the camera matrices. - * To equate this to Kalibr's models, this is what you would use for `pinhole-radtan`. - */ - cv::Point2f undistort_point_brown(cv::Point2f pt_in, cv::Matx33d& camK, cv::Vec4d& camD) { - // Convert to opencv format - cv::Mat mat(1, 2, CV_32F); - mat.at(0, 0) = pt_in.x; - mat.at(0, 1) = pt_in.y; - mat = mat.reshape(2); // Nx1, 2-channel - // Undistort it! - cv::undistortPoints(mat, mat, camK, camD); - // Construct our return vector - cv::Point2f pt_out; - mat = mat.reshape(1); // Nx2, 1-channel - pt_out.x = mat.at(0, 0); - pt_out.y = mat.at(0, 1); - return pt_out; - } + protected: + + /** + * @brief Main function that will undistort/normalize a point. + * @param pt_in uv 2x1 point that we will undistort + * @param cam_id id of which camera this point is in + * @return undistorted 2x1 point + * + * Given a uv point, this will undistort it based on the camera matrices. + * This will call on the model needed, depending on what type of camera it is! + * So if we have fisheye for camera_1 is true, we will undistort with the fisheye model. + * In Kalibr's terms, the non-fisheye is `pinhole-radtan` while the fisheye is the `pinhole-equi` model. + */ + cv::Point2f undistort_point(cv::Point2f pt_in, size_t cam_id) { + // Determine what camera parameters we should use + cv::Matx33d camK = this->camera_k_OPENCV[cam_id]; + cv::Vec4d camD = this->camera_d_OPENCV[cam_id]; + // Call on the fisheye if we should! + if (this->camera_fisheye[cam_id]) { + return undistort_point_fisheye(pt_in, camK, camD); + } + return undistort_point_brown(pt_in, camK, camD); + } - /** - * @brief Undistort function FISHEYE/EQUIDISTANT. - * - * Given a uv point, this will undistort it based on the camera matrices. - * To equate this to Kalibr's models, this is what you would use for `pinhole-equi`. - */ - cv::Point2f undistort_point_fisheye(cv::Point2f pt_in, cv::Matx33d& camK, cv::Vec4d& camD) { - // Convert point to opencv format - cv::Mat mat(1, 2, CV_32F); - mat.at(0, 0) = pt_in.x; - mat.at(0, 1) = pt_in.y; - mat = mat.reshape(2); // Nx1, 2-channel - // Undistort it! - cv::fisheye::undistortPoints(mat, mat, camK, camD); - // Construct our return vector - cv::Point2f pt_out; - mat = mat.reshape(1); // Nx2, 1-channel - pt_out.x = mat.at(0, 0); - pt_out.y = mat.at(0, 1); - return pt_out; - } + /** + * @brief Undistort function RADTAN/BROWN. + * + * Given a uv point, this will undistort it based on the camera matrices. + * To equate this to Kalibr's models, this is what you would use for `pinhole-radtan`. + */ + cv::Point2f undistort_point_brown(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { + // Convert to opencv format + cv::Mat mat(1, 2, CV_32F); + mat.at(0, 0) = pt_in.x; + mat.at(0, 1) = pt_in.y; + mat = mat.reshape(2); // Nx1, 2-channel + // Undistort it! + cv::undistortPoints(mat, mat, camK, camD); + // Construct our return vector + cv::Point2f pt_out; + mat = mat.reshape(1); // Nx2, 1-channel + pt_out.x = mat.at(0, 0); + pt_out.y = mat.at(0, 1); + return pt_out; + } - // Database with all our current features - FeatureDatabase* database; + /** + * @brief Undistort function FISHEYE/EQUIDISTANT. + * + * Given a uv point, this will undistort it based on the camera matrices. + * To equate this to Kalibr's models, this is what you would use for `pinhole-equi`. + */ + cv::Point2f undistort_point_fisheye(cv::Point2f pt_in, cv::Matx33d &camK, cv::Vec4d &camD) { + // Convert point to opencv format + cv::Mat mat(1, 2, CV_32F); + mat.at(0, 0) = pt_in.x; + mat.at(0, 1) = pt_in.y; + mat = mat.reshape(2); // Nx1, 2-channel + // Undistort it! + cv::fisheye::undistortPoints(mat, mat, camK, camD); + // Construct our return vector + cv::Point2f pt_out; + mat = mat.reshape(1); // Nx2, 1-channel + pt_out.x = mat.at(0, 0); + pt_out.y = mat.at(0, 1); + return pt_out; + } - // Our camera information (used to undistort our added UV coordinates) - // NOTE: we do NOT undistort and rectify the image as this takes a lot of time - std::map camera_k; - std::map> camera_d; + // Database with all our current features + FeatureDatabase *database; - // If we are a fisheye model or not - std::map camera_fisheye; + // Our camera information (used to undistort our added UV coordinates) + // NOTE: we do NOT undistort and rectify the image as this takes a lot of time + std::map camera_k; + std::map> camera_d; - // Camera intrinsics in OpenCV format - std::map camera_k_OPENCV; - std::map camera_d_OPENCV; + // If we are a fisheye model or not + std::map camera_fisheye; - // number of features we should try to track frame to frame - int num_features; + // Camera intrinsics in OpenCV format + std::map camera_k_OPENCV; + std::map camera_d_OPENCV; - // Last set of images - std::map img_last; + // number of features we should try to track frame to frame + int num_features; - // Last set of tracked points - std::map> pts_last; + // Last set of images + std::map img_last; - // Set of IDs of each current feature in the database - size_t currid = 0; - std::map> ids_last; + // Last set of tracked points + std::map> pts_last; + // Set of IDs of each current feature in the database + size_t currid = 0; + std::map> ids_last; -}; + }; +} #endif /* OV_CORE_TRACK_BASE_H */ \ No newline at end of file diff --git a/ov_core/src/track/TrackDescriptor.cpp b/ov_core/src/track/TrackDescriptor.cpp index d4b835a52..9d2bffa47 100644 --- a/ov_core/src/track/TrackDescriptor.cpp +++ b/ov_core/src/track/TrackDescriptor.cpp @@ -1,6 +1,8 @@ #include "TrackDescriptor.h" +using namespace ov_core; + void TrackDescriptor::feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) { diff --git a/ov_core/src/track/TrackDescriptor.h b/ov_core/src/track/TrackDescriptor.h index 1ec722ef4..4630c0a8a 100644 --- a/ov_core/src/track/TrackDescriptor.h +++ b/ov_core/src/track/TrackDescriptor.h @@ -6,151 +6,167 @@ #include "TrackBase.h" - /** - * @brief Descriptor-based visual tracking - * - * Here we use descriptor matching to track features from one frame to the next. - * We track both temporally, and across stereo pairs to get stereo contraints. - * Right now we use ORB descriptors as we have found it is the fastest when computing descriptors. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class TrackDescriptor : public TrackBase -{ +namespace ov_core { -public: /** - * @brief Public default constructor - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - */ - TrackDescriptor(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye): - TrackBase(camera_k,camera_d,camera_fisheye),threshold(10),grid_x(8),grid_y(5),knn_ratio(0.75) {} - - /** - * @brief Public constructor with configuration variables - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - * @param fast_threshold FAST detection threshold - * @param gridx size of grid in the x-direction / u-direction - * @param gridy size of grid in the y-direction / v-direction - * @param knnratio matching ratio needed (smaller value forces top two descriptors during match to be more different) - */ - explicit TrackDescriptor(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye, - int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, double knnratio): - TrackBase(camera_k,camera_d,camera_fisheye,numfeats,numaruco),threshold(fast_threshold),grid_x(gridx),grid_y(gridy),knn_ratio(knnratio) {} - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - - -protected: - - /** - * @brief Detects new features in the current image - * @param img0 image we will detect features on - * @param pts0 vector of extracted keypoints - * @param desc0 vector of the extracted descriptors - * @param ids0 vector of all new IDs + * @brief Descriptor-based visual tracking * - * Given a set of images, and their currently extracted features, this will try to add new features. - * We return all extracted descriptors here since we DO NOT need to do stereo tracking left to right. - * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. - * See robust_match() for the matching. + * Here we use descriptor matching to track features from one frame to the next. + * We track both temporally, and across stereo pairs to get stereo contraints. + * Right now we use ORB descriptors as we have found it is the fastest when computing descriptors. */ - void perform_detection_monocular(const cv::Mat& img0, std::vector& pts0, cv::Mat& desc0, std::vector& ids0); - - /** - * @brief Detects new features in the current stereo pair - * @param img0 left image we will detect features on - * @param img1 right image we will detect features on - * @param pts0 left vector of new keypoints - * @param pts1 right vector of new keypoints - * @param desc0 left vector of extracted descriptors - * @param desc1 left vector of extracted descriptors - * @param ids0 left vector of all new IDs - * @param ids1 right vector of all new IDs - * - * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. - * We also do STEREO matching from the left to right, and only return good matches that are found in both the left and right. - * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. - * See robust_match() for the matching. - */ - void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, std::vector &pts1, - cv::Mat &desc0, cv::Mat &desc1, std::vector &ids0, std::vector &ids1); - - /** - * @brief Find matches between two keypoint+descriptor sets. - * @param pts0 first vector of keypoints - * @param pts1 second vector of keypoints - * @param desc0 first vector of descriptors - * @param desc1 second vector of decriptors - * @param matches vector of matches that we have found - * - * This will perform a "robust match" between the two sets of points (slow but has great results). - * First we do a simple KNN match from 1to2 and 2to1, which is followed by a ratio check and symmetry check. - * Original code is from the "RobustMatcher" in the opencv examples, and seems to give very good results in the matches. - * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp - */ - void robust_match(std::vector& pts0, std::vector pts1, - cv::Mat& desc0, cv::Mat& desc1, std::vector& matches); - - // Helper functions for the robust_match function - // Original code is from the "RobustMatcher" in the opencv examples - // https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp - void robust_ratio_test(std::vector >& matches); - void robust_symmetry_test(std::vector >& matches1, std::vector >& matches2, std::vector& good_matches); - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Our orb extractor - cv::Ptr orb0 = cv::ORB::create(); - cv::Ptr orb1 = cv::ORB::create(); - cv::Ptr freak0 = cv::xfeatures2d::FREAK::create(); - cv::Ptr freak1 = cv::xfeatures2d::FREAK::create(); - - // Our descriptor matcher - cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming"); - - // Parameters for our FAST grid detector - int threshold; - int grid_x; - int grid_y; - - // The ratio between two kNN matches, if that ratio is larger then this threshold - // then the two features are too close, so should be considered ambiguous/bad match - double knn_ratio; - - // Descriptor matrices - std::map desc_last; - - -}; - + class TrackDescriptor : public TrackBase { + + public: + + /** + * @brief Public default constructor + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + */ + TrackDescriptor(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye) : + TrackBase(camera_k, camera_d, camera_fisheye), threshold(10), grid_x(8), grid_y(5), knn_ratio(0.75) {} + + /** + * @brief Public constructor with configuration variables + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + * @param fast_threshold FAST detection threshold + * @param gridx size of grid in the x-direction / u-direction + * @param gridy size of grid in the y-direction / v-direction + * @param knnratio matching ratio needed (smaller value forces top two descriptors during match to be more different) + */ + explicit TrackDescriptor(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye, + int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, double knnratio) + : + TrackBase(camera_k, camera_d, camera_fisheye, numfeats, numaruco), threshold(fast_threshold), + grid_x(gridx), grid_y(gridy), knn_ratio(knnratio) {} + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, + size_t cam_id_right) override; + + + protected: + + /** + * @brief Detects new features in the current image + * @param img0 image we will detect features on + * @param pts0 vector of extracted keypoints + * @param desc0 vector of the extracted descriptors + * @param ids0 vector of all new IDs + * + * Given a set of images, and their currently extracted features, this will try to add new features. + * We return all extracted descriptors here since we DO NOT need to do stereo tracking left to right. + * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. + * See robust_match() for the matching. + */ + void perform_detection_monocular(const cv::Mat &img0, std::vector &pts0, cv::Mat &desc0, + std::vector &ids0); + + /** + * @brief Detects new features in the current stereo pair + * @param img0 left image we will detect features on + * @param img1 right image we will detect features on + * @param pts0 left vector of new keypoints + * @param pts1 right vector of new keypoints + * @param desc0 left vector of extracted descriptors + * @param desc1 left vector of extracted descriptors + * @param ids0 left vector of all new IDs + * @param ids1 right vector of all new IDs + * + * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. + * We also do STEREO matching from the left to right, and only return good matches that are found in both the left and right. + * Our vector of IDs will be later overwritten when we match features temporally to the previous frame's features. + * See robust_match() for the matching. + */ + void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, + std::vector &pts1, + cv::Mat &desc0, cv::Mat &desc1, std::vector &ids0, + std::vector &ids1); + + /** + * @brief Find matches between two keypoint+descriptor sets. + * @param pts0 first vector of keypoints + * @param pts1 second vector of keypoints + * @param desc0 first vector of descriptors + * @param desc1 second vector of decriptors + * @param matches vector of matches that we have found + * + * This will perform a "robust match" between the two sets of points (slow but has great results). + * First we do a simple KNN match from 1to2 and 2to1, which is followed by a ratio check and symmetry check. + * Original code is from the "RobustMatcher" in the opencv examples, and seems to give very good results in the matches. + * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp + */ + void robust_match(std::vector &pts0, std::vector pts1, + cv::Mat &desc0, cv::Mat &desc1, std::vector &matches); + + // Helper functions for the robust_match function + // Original code is from the "RobustMatcher" in the opencv examples + // https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/RobustMatcher.cpp + void robust_ratio_test(std::vector > &matches); + + void robust_symmetry_test(std::vector > &matches1, + std::vector > &matches2, + std::vector &good_matches); + + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Our orb extractor + cv::Ptr orb0 = cv::ORB::create(); + cv::Ptr orb1 = cv::ORB::create(); + cv::Ptr freak0 = cv::xfeatures2d::FREAK::create(); + cv::Ptr freak1 = cv::xfeatures2d::FREAK::create(); + + // Our descriptor matcher + cv::Ptr matcher = cv::DescriptorMatcher::create("BruteForce-Hamming"); + + // Parameters for our FAST grid detector + int threshold; + int grid_x; + int grid_y; + + // The ratio between two kNN matches, if that ratio is larger then this threshold + // then the two features are too close, so should be considered ambiguous/bad match + double knn_ratio; + + // Descriptor matrices + std::map desc_last; + + + }; + + +} #endif /* OV_CORE_TRACK_DESC_H */ \ No newline at end of file diff --git a/ov_core/src/track/TrackKLT.cpp b/ov_core/src/track/TrackKLT.cpp index 9fd5c3ef7..149311854 100644 --- a/ov_core/src/track/TrackKLT.cpp +++ b/ov_core/src/track/TrackKLT.cpp @@ -1,6 +1,9 @@ #include "TrackKLT.h" +using namespace ov_core; + + void TrackKLT::feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) { // Start timing diff --git a/ov_core/src/track/TrackKLT.h b/ov_core/src/track/TrackKLT.h index 6b2a2da71..6e3e036d3 100644 --- a/ov_core/src/track/TrackKLT.h +++ b/ov_core/src/track/TrackKLT.h @@ -6,127 +6,137 @@ /** - * @brief KLT tracking of features. - * - * This is the implementation of a KLT visual frontend for tracking sparse features. - * We can track either monocular cameras across time (temporally) along with - * stereo cameras which we also track across time (temporally) but track from left to right - * to find the stereo correspondence information also. - * This uses the [calcOpticalFlowPyrLK](https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp) OpenCV function to do the KLT tracking. + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -class TrackKLT : public TrackBase -{ +namespace ov_core { -public: /** - * @brief Public default constructor - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - */ - TrackKLT(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye): - TrackBase(camera_k,camera_d,camera_fisheye),threshold(10),grid_x(8),grid_y(5),min_px_dist(30) {} - - /** - * @brief Public constructor with configuration variables - * @param camera_k map of camera_id => 3x3 camera intrinic matrix - * @param camera_d map of camera_id => 4x1 camera distortion parameters - * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model - * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) - * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value - * @param fast_threshold FAST detection threshold - * @param gridx size of grid in the x-direction / u-direction - * @param gridy size of grid in the y-direction / v-direction - * @param minpxdist features need to be at least this number pixels away from each other - */ - explicit TrackKLT(std::map camera_k, - std::map> camera_d, - std::map camera_fisheye, - int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, int minpxdist): - TrackBase(camera_k,camera_d,camera_fisheye,numfeats,numaruco),threshold(fast_threshold),grid_x(gridx),grid_y(gridy),min_px_dist(minpxdist) {} - - - /** - * @brief Process a new monocular image - * @param timestamp timestamp the new image occurred at - * @param img new cv:Mat grayscale image - * @param cam_id the camera id that this new image corresponds too - */ - void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; - - /** - * @brief Process new stereo pair of images - * @param timestamp timestamp this pair occured at (stereo is synchronised) - * @param img_left first grayscaled image - * @param img_right second grayscaled image - * @param cam_id_left first image camera id - * @param cam_id_right second image camera id - */ - void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; - - -protected: - - /** - * @brief Detects new features in the current image - * @param img0 image we will detect features on - * @param pts0 vector of currently extracted keypoints in this image - * @param ids0 vector of feature ids for each currently extracted keypoint + * @brief KLT tracking of features. * - * Given an image and its currently extracted features, this will try to add new features if needed. - * Will try to always have the "max_features" being tracked through KLT at each timestep. - * Passed images should already be grayscaled. + * This is the implementation of a KLT visual frontend for tracking sparse features. + * We can track either monocular cameras across time (temporally) along with + * stereo cameras which we also track across time (temporally) but track from left to right + * to find the stereo correspondence information also. + * This uses the [calcOpticalFlowPyrLK](https://github.com/opencv/opencv/blob/master/modules/video/src/lkpyramid.cpp) OpenCV function to do the KLT tracking. */ - void perform_detection_monocular(const cv::Mat &img0, std::vector &pts0, std::vector &ids0); - - /** - * @brief Detects new features in the current stereo pair - * @param img0 left image we will detect features on - * @param img1 right image we will detect features on - * @param pts0 left vector of currently extracted keypoints - * @param pts1 right vector of currently extracted keypoints - * @param ids0 left vector of feature ids for each currently extracted keypoint - * @param ids1 right vector of feature ids for each currently extracted keypoint - * - * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. - * So we detect features in the left image, and then KLT track them onto the right image. - * If we have valid tracks, then we have both the keypoint on the left and its matching point in the right image. - * Will try to always have the "max_features" being tracked through KLT at each timestep. - */ - void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, - std::vector &pts1, std::vector &ids0, std::vector &ids1); - - /** - * @brief KLT track between two images, and do RANSAC afterwards - * @param img0 starting image - * @param img1 image we want to track too - * @param pts0 starting points - * @param pts1 points we have tracked - * @param mask_out what points had valid tracks - * - * This will track features from the first image into the second image. - * The two point vectors will be of equal size, but the mask_out variable will specify which points are good or bad. - * If the second vector is non-empty, it will be used as an initial guess of where the keypoints are in the second image. - */ - void perform_matching(const cv::Mat& img0, const cv::Mat& img1, std::vector& pts0, std::vector& pts1, std::vector& mask_out); - - // Timing variables - boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; - - // Parameters for our FAST grid detector - int threshold; - int grid_x; - int grid_y; - - // Minimum pixel distance to be "far away enough" to be a different extracted feature - int min_px_dist; - -}; - - + class TrackKLT : public TrackBase { + + public: + + /** + * @brief Public default constructor + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + */ + TrackKLT(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye) : + TrackBase(camera_k, camera_d, camera_fisheye), threshold(10), grid_x(8), grid_y(5), min_px_dist(30) {} + + /** + * @brief Public constructor with configuration variables + * @param camera_k map of camera_id => 3x3 camera intrinic matrix + * @param camera_d map of camera_id => 4x1 camera distortion parameters + * @param camera_fisheye map of camera_id => bool if we should do radtan or fisheye distortion model + * @param numfeats number of features we want want to track (i.e. track 200 points from frame to frame) + * @param numaruco the max id of the arucotags, so we ensure that we start our non-auroc features above this value + * @param fast_threshold FAST detection threshold + * @param gridx size of grid in the x-direction / u-direction + * @param gridy size of grid in the y-direction / v-direction + * @param minpxdist features need to be at least this number pixels away from each other + */ + explicit TrackKLT(std::map camera_k, + std::map> camera_d, + std::map camera_fisheye, + int numfeats, int numaruco, int fast_threshold, int gridx, int gridy, int minpxdist) : + TrackBase(camera_k, camera_d, camera_fisheye, numfeats, numaruco), threshold(fast_threshold), + grid_x(gridx), grid_y(gridy), min_px_dist(minpxdist) {} + + + /** + * @brief Process a new monocular image + * @param timestamp timestamp the new image occurred at + * @param img new cv:Mat grayscale image + * @param cam_id the camera id that this new image corresponds too + */ + void feed_monocular(double timestamp, cv::Mat &img, size_t cam_id) override; + + /** + * @brief Process new stereo pair of images + * @param timestamp timestamp this pair occured at (stereo is synchronised) + * @param img_left first grayscaled image + * @param img_right second grayscaled image + * @param cam_id_left first image camera id + * @param cam_id_right second image camera id + */ + void feed_stereo(double timestamp, cv::Mat &img_left, cv::Mat &img_right, size_t cam_id_left, size_t cam_id_right) override; + + + protected: + + /** + * @brief Detects new features in the current image + * @param img0 image we will detect features on + * @param pts0 vector of currently extracted keypoints in this image + * @param ids0 vector of feature ids for each currently extracted keypoint + * + * Given an image and its currently extracted features, this will try to add new features if needed. + * Will try to always have the "max_features" being tracked through KLT at each timestep. + * Passed images should already be grayscaled. + */ + void + perform_detection_monocular(const cv::Mat &img0, std::vector &pts0, std::vector &ids0); + + /** + * @brief Detects new features in the current stereo pair + * @param img0 left image we will detect features on + * @param img1 right image we will detect features on + * @param pts0 left vector of currently extracted keypoints + * @param pts1 right vector of currently extracted keypoints + * @param ids0 left vector of feature ids for each currently extracted keypoint + * @param ids1 right vector of feature ids for each currently extracted keypoint + * + * This does the same logic as the perform_detection_monocular() function, but we also enforce stereo contraints. + * So we detect features in the left image, and then KLT track them onto the right image. + * If we have valid tracks, then we have both the keypoint on the left and its matching point in the right image. + * Will try to always have the "max_features" being tracked through KLT at each timestep. + */ + void perform_detection_stereo(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, + std::vector &pts1, std::vector &ids0, std::vector &ids1); + + /** + * @brief KLT track between two images, and do RANSAC afterwards + * @param img0 starting image + * @param img1 image we want to track too + * @param pts0 starting points + * @param pts1 points we have tracked + * @param mask_out what points had valid tracks + * + * This will track features from the first image into the second image. + * The two point vectors will be of equal size, but the mask_out variable will specify which points are good or bad. + * If the second vector is non-empty, it will be used as an initial guess of where the keypoints are in the second image. + */ + void perform_matching(const cv::Mat &img0, const cv::Mat &img1, std::vector &pts0, + std::vector &pts1, std::vector &mask_out); + + // Timing variables + boost::posix_time::ptime rT1, rT2, rT3, rT4, rT5, rT6, rT7; + + // Parameters for our FAST grid detector + int threshold; + int grid_x; + int grid_y; + + // Minimum pixel distance to be "far away enough" to be a different extracted feature + int min_px_dist; + + }; + + +} #endif /* OV_CORE_TRACK_KLT_H */ \ No newline at end of file diff --git a/ov_core/src/types/IMU.h b/ov_core/src/types/IMU.h deleted file mode 100644 index c9fa972d1..000000000 --- a/ov_core/src/types/IMU.h +++ /dev/null @@ -1,223 +0,0 @@ -// -// Created by keck on 5/23/19. -// - -#ifndef PROJECT_JPLQUAT_H -#define PROJECT_JPLQUAT_H - -#endif //PROJECT_JPLQUAT_H - -#include "utils/quat_ops.h" -#include "PoseJPL.h" - -/** - * @brief Derived Type class that implements an IMU state using a JPLPose, a vector velocity, vector gyro bias, - * and vector accel bias - * -*/ - -class IMU : public Type{ - -public: - - IMU() : Type(15){ - _pose = new PoseJPL(); - _v = new Vec(3); - _bg = new Vec(3); - _ba = new Vec(3); - - Eigen::Matrix imu0; - imu0.setZero(); - imu0(3) = 1.0; - - set_value(imu0); - set_fej(imu0); - } - - ~IMU() { } - - /** - * @brief Performs update operation using JPLQuat update for orientation, then vector updates for - * position, velocity, gyro bias, and accel bias (in that order) - * @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba) - */ - - void update(const Eigen::VectorXd dx){ - - assert(dx.rows()== _size); - - Eigen::Matrix newX = _value; - - Eigen::Matrix dq; - dq << .5*dx.block(0,0,3,1) , 1.0; - dq= dq/dq.norm(); - - newX.block(0,0,4,1) = quat_multiply(dq, _value); - newX.block(4,0,3,1) += dx.block(3,0,3,1); - - newX.block(7,0,3,1) += dx.block(6,0,3,1); - newX.block(10,0,3,1) += dx.block(9,0,3,1); - newX.block(13,0,3,1) += dx.block(12,0,3,1); - - set_value(newX); - - } - - /**@brief Sets the value of the estimate - * @param new_value - */ - void set_value(const Eigen::VectorXd new_value){ - - _pose->set_value(new_value.block(0,0,7,1)); - _v->set_value(new_value.block(7,0,3,1)); - _bg->set_value(new_value.block(10,0,3,1)); - _ba->set_value(new_value.block(13,0,3,1)); - - _value = new_value; - } - - /**@brief Sets the value of the first estimate - * @param new_value - */ - void set_fej(const Eigen::VectorXd new_value){ - - _pose->set_fej_value(new_value.block(0,0,7,1)); - _v->set_fej(new_value.block(7,0,3,1)); - _bg->set_fej(new_value.block(10,0,3,1)); - _ba->set_fej(new_value.block(13,0,3,1)); - - _fej = new_value; - } - - Type* clone(){ - Type* Clone= new IMU(); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - /// Rotation access - Eigen::Matrix Rot() const{ - return _pose->Rot(); - } - - /// FEJ Rotation access - Eigen::Matrix Rot_fej() const{ - return _pose->Rot_fej(); - } - - /// Rotation access quat - Eigen::Matrix quat() const{ - return _pose->quat(); - } - - /// FEJ Rotation access quat - Eigen::Matrix quat_fej() const{ - return _pose->quat_fej(); - } - - - /// Position access - Eigen::Matrix pos() const{ - return _pose->pos(); - } - - /// FEJ position access - Eigen::Matrix pos_fej() const{ - return _pose->pos_fej(); - } - - /// Velocity access - Eigen::Matrix vel() const{ - return _v->value(); - } - - // FEJ velocity access - Eigen::Matrix vel_fej() const{ - return _v->fej(); - } - - /// Gyro bias access - Eigen::Matrix bias_g() const{ - return _bg->value(); - } - - /// FEJ gyro bias access - Eigen::Matrix bias_g_fej() const{ - return _bg->fej(); - } - - /// Accel bias access - Eigen::Matrix bias_a() const{ - return _ba->value(); - } - - // FEJ accel bias access - Eigen::Matrix bias_a_fej() const{ - return _ba->fej(); - } - - /** @brief Used to find the components inside the IMU if needed - * @param check variable to find - */ - Type* check_if_same_variable(Type* check){ - if (check== this){ - return this; - } - else if (check == _pose){ - return _pose; - } - else if (check == q()){ - return q(); - } - else if (check == p()){ - return p(); - } - else if (check == _v){ - return _v; - } - else if (check == _bg){ - return bg(); - } - else if (check == _ba){ - return ba(); - } - else{ - return nullptr; - } - } - - PoseJPL *pose(){ - return _pose; - } - JPLQuat *q(){ - return _pose->q(); - } - Vec *p(){ - return _pose->p(); - } - Vec *v(){ - return _v; - } - Vec *bg(){ - return _bg; - } - Vec *ba(){ - return _ba; - } - -protected: - - /// Pose subvariable - PoseJPL *_pose; - - /// Velocity subvariable - Vec *_v; - - /// Gyro bias subvariable - Vec *_bg; - - /// Accel bias subvariable - Vec *_ba; - -}; \ No newline at end of file diff --git a/ov_core/src/types/JPLQuat.h b/ov_core/src/types/JPLQuat.h deleted file mode 100644 index 3eadcd8de..000000000 --- a/ov_core/src/types/JPLQuat.h +++ /dev/null @@ -1,110 +0,0 @@ -// -// Created by keck on 5/23/19. -// - -#ifndef PROJECT_JPLQUAT_H -#define PROJECT_JPLQUAT_H - -#endif //PROJECT_JPLQUAT_H - -#include "utils/quat_ops.h" -#include "Type.h" - -/** - * @brief Derived Type class that implements the JPL convention quaternion with left-multiplicative error state - * -*/ - -class JPLQuat : public Type{ - -public: - - /** - * @brief Quaternion constructor - * - */ - - JPLQuat() : Type(3){ - Eigen::Matrix q0; - q0.setZero(); - q0(3) = 1.0; - set_value(q0); - set_fej(q0); - } - - ~JPLQuat() { } - - /** - * @brief Implements update operation by left-multiplying the current quaternion with a quaternion built - * from a small axis-angle perturbation - * @param dx Axis-angle representation of the perturbing quaternion - */ - - void update(const Eigen::VectorXd dx){ - - assert(dx.rows()== _size); - - //Build perturbing quaternion - Eigen::Matrix dq; - dq << .5*dx , 1.0; - dq= dq/dq.norm(); - - //Update estimate and recompute R - set_value(quat_multiply(dq, _value)); - - } - - /** - * @brief Sets the value of the estimate and recompute the rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_value(const Eigen::VectorXd new_value){ - - assert(new_value.rows() == 7); - - _value = new_value; - - //compute associated rotation - _R = quat_2_Rot(new_value); - } - - /** - * @brief Sets the fej value and recomputes the fej rotation matrix - * @param new_value New value for the quaternion estimate - */ - void set_fej_value(const Eigen::VectorXd new_value){ - - assert(new_value.rows() == 7); - - _fej = new_value; - - //compute associated rotation - _Rfej = quat_2_Rot(new_value); - } - - Type* clone(){ - Type* Clone= new JPLQuat(); - Clone->set_value(value()); - return Clone; - } - - ///Rotation access - Eigen::Matrix Rot() const{ - return _R; - } - - ///FEJ Rotation access - Eigen::Matrix Rot_fej() const{ - return _Rfej; - } - -protected: - - - //Stores the rotation - Eigen::Matrix _R; - - //Stores the first-estimate rotation - Eigen::Matrix _Rfej; - -}; \ No newline at end of file diff --git a/ov_core/src/types/PoseJPL.h b/ov_core/src/types/PoseJPL.h deleted file mode 100644 index 20dfdc726..000000000 --- a/ov_core/src/types/PoseJPL.h +++ /dev/null @@ -1,163 +0,0 @@ -// -// Created by keck on 5/23/19. -// - -#ifndef PROJECT_JPLPOSE_H -#define PROJECT_JPLPOSE_H - -#include "utils/quat_ops.h" -#include "JPLQuat.h" -#include "Vec.h" - - -/** - * @brief Derived Type class that implements a pose (orientation plus position) with a JPL quaternion representation for - * the orientation - * -*/ - -class PoseJPL : public Type{ - -public: - - PoseJPL() : Type(6){ - - //Initialize subvariables - _q = new JPLQuat(); - _p = new Vec(3); - - Eigen::Matrix pose0; - pose0.setZero(); - pose0(3) = 1.0; - set_value(pose0); - set_fej(pose0); - } - - ~PoseJPL() { } - - /** - *@brief Update q and p using a the JPLQuat update for orientation and vector update for position - * @param dx Correction vector (orientation then position) - */ - void update(const Eigen::VectorXd dx){ - - assert(dx.rows()== _size); - - Eigen::Matrix newX = _value; - - Eigen::Matrix dq; - dq << .5*dx.block(0,0,3,1) , 1.0; - dq= dq/dq.norm(); - - //Update orientation - newX.block(0,0,4,1) = quat_multiply(dq, quat()); - - //Update position - newX.block(4,0,3,1) += dx.block(3,0,3,1); - - set_value(newX); - - } - - //Sets the value of the estimate - void set_value(const Eigen::VectorXd new_value){ - - assert(new_value.rows() == 7); - - //Set orientation value - _q->set_value(new_value.block(0,0,4,1)); - - //Set position value - _p->set_value(new_value.block(4,0,3,1)); - - _value = new_value; - } - - //Sets the value of the first estimate - void set_fej_value(const Eigen::VectorXd new_value){ - - assert(new_value.rows() == 7); - //Set orientation fej value - _q->set_fej(new_value.block(0,0,4,1)); - - //Set position fej value - _p->set_fej(new_value.block(4,0,3,1)); - - _fej = new_value; - } - - Type* clone(){ - Type* Clone= new PoseJPL(); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - /// Rotation access - Eigen::Matrix Rot() const{ - return _q->Rot(); - } - - /// FEJ Rotation access - Eigen::Matrix Rot_fej() const{ - return _q->Rot_fej();; - } - - /// Rotation access as quaternion - Eigen::Matrix quat() const{ - return _q->value(); - } - - /// FEJ Rotation access as quaternion - Eigen::Matrix quat_fej() const{ - return _q->fej(); - } - - - /// Position access - Eigen::Matrix pos() const{ - return _p->value(); - } - - // FEJ position access - Eigen::Matrix pos_fej() const{ - return _p->fej(); - } - - /** @brief Used to find the components inside the Pose if needed - * @param check variable to find - */ - Type* check_if_same_variable(Type* check){ - if (check== this){ - return this; - } - else if (check == _q){ - return q(); - } - else if (check == _p){ - return p(); - } - else{ - return nullptr; - } - } - - JPLQuat * q(){ - return _q; - } - - Vec * p(){ - return _p; - } - -protected: - - ///Subvariable containing orientation - JPLQuat *_q; - - ///Subvariable containg position - Vec * _p; - -}; - -#endif //PROJECT_JPLPOSE_H \ No newline at end of file diff --git a/ov_core/src/types/Type.h b/ov_core/src/types/Type.h deleted file mode 100644 index a5ccf4dfb..000000000 --- a/ov_core/src/types/Type.h +++ /dev/null @@ -1,123 +0,0 @@ -// -// Created by keck on 5/23/19. -// - -#ifndef PROJECT_TYPE_H -#define PROJECT_TYPE_H - -#include - -/** - * @brief Base class for estimated variables. - * - * This class is used how variables are represented or updated (e.g., vectors or quaternions) - * -*/ -class Type { - -public: - - /** - * @brief Default constructor for our Type - * @param size_ degrees of freedom of variable (i.e., the size of the error state) - */ - - - Type(int size_){_size= size_;} - - virtual ~Type() { }; - - /** - * @brief Sets id used to track location of variable in the filter covariance - * @param new_id entry in filter covariance corresponding to this variable - */ - virtual void set_local_id(int new_id){ - _id= new_id; - } - - /** - * @brief Access to variable id - */ - int id(){ - return _id; - } - - /** - * @brief Access to variable size - */ - int size(){ - return _size; - } - - /** - * @brief Update variable due to perturbation of error state - * @param dx Perturbation used to update the variable through a defined "boxplus" operation - */ - virtual void update(const Eigen::VectorXd dx) = 0; - - /** - * @brief Access variable's estimate - */ - virtual Eigen::VectorXd value() const{ - return _value; - } - - /** - * @brief Access variable's first-estimate - */ - virtual Eigen::VectorXd fej() const{ - return _fej; - } - - /** - * @brief Overwrite value of state's estimate - * @param new_value New value that will overwrite state's value - */ - virtual void set_value(const Eigen::VectorXd new_value){ - _value = new_value; - } - - /** - * @brief Overwrite value of first-estimate - * @param new_value New value that will overwrite state's fej - */ - virtual void set_fej(const Eigen::VectorXd new_value){ - _fej = new_value; - } - - /** - * @brief Create a clone of this variable - */ - virtual Type* clone()=0; - - /** - * @brief Determine if "check" is the same variable - * @param check Type pointer to compare to - */ - virtual Type* check_if_same_variable(const Type* check){ - if (check == this){ - return this; - } - else{ - return nullptr; - } - } - -protected: - - /// First-estimate - Eigen::VectorXd _fej; - - /// Current best estimate - Eigen::VectorXd _value; - - /// Location of error state in covariance - int _id = -1; - - /// Dimension of error state - int _size = -1; - - -}; - -#endif //PROJECT_TYPE_H \ No newline at end of file diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h deleted file mode 100644 index 643d0d964..000000000 --- a/ov_core/src/types/Vec.h +++ /dev/null @@ -1,57 +0,0 @@ -// -// Created by keck on 5/23/19. -// - -#ifndef PROJECT_VEC_H -#define PROJECT_VEC_H - -#include "Type.h" - -/** - * @brief Derived Type class that implements vector variables - * -*/ - -class Vec: public Type { - -public: - - - /** - * @brief Default constructor for Vec - * @param dim Size of the vector (will be same as error state) - */ - Vec(int dim) : Type(dim){ - _value = Eigen::VectorXd::Zero(dim); - _fej = Eigen::VectorXd::Zero(dim); - } - - ~Vec() { } - - - /** - * @brief Implements the update operation through standard vector addition - * @param dx Additive error state correction - */ - void update(const Eigen::VectorXd dx){ - - assert(dx.rows() == _size); - set_value(_value + dx); - } - - - - /** - * @brief Performs all the cloning - */ - Type* clone(){ - Type* Clone= new Vec(_size); - Clone->set_value(value()); - Clone->set_fej(fej()); - return Clone; - } - - -}; - -#endif //PROJECT_VEC_H \ No newline at end of file diff --git a/ov_core/src/utils/dataset_reader.h b/ov_core/src/utils/dataset_reader.h index 1d0f83c24..d14d0469b 100644 --- a/ov_core/src/utils/dataset_reader.h +++ b/ov_core/src/utils/dataset_reader.h @@ -29,109 +29,113 @@ using namespace std; - -/// Our groundtruth states loaded, see load_gt_file() -std::map> gt_states; - - /** - * @brief Load a ASL format groundtruth file - * @param path Path to the CSV file of groundtruth data - * - * Here we will try to load a groundtruth file that is in the ASL/EUROCMAV format. - * If we can't open the file, or it is in the wrong format we will error and exit the program. - * See get_gt_state() for a way to get the groundtruth state at a given timestep + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -void load_gt_file(std::string path) { - - // Open the file - std::ifstream file; - std::string line; - file.open(path); - - // Check that it was successfull - if (!file) { - ROS_ERROR("ERROR: Unable to open groundtruth file..."); - ROS_ERROR("ERROR: %s",path.c_str()); - std::exit(EXIT_FAILURE); - } +namespace ov_core { + + /// Our groundtruth states loaded, see load_gt_file() + std::map> gt_states; + + /** + * @brief Load a ASL format groundtruth file + * @param path Path to the CSV file of groundtruth data + * + * Here we will try to load a groundtruth file that is in the ASL/EUROCMAV format. + * If we can't open the file, or it is in the wrong format we will error and exit the program. + * See get_gt_state() for a way to get the groundtruth state at a given timestep + */ + void load_gt_file(std::string path) { + + // Open the file + std::ifstream file; + std::string line; + file.open(path); + + // Check that it was successfull + if (!file) { + ROS_ERROR("ERROR: Unable to open groundtruth file..."); + ROS_ERROR("ERROR: %s", path.c_str()); + std::exit(EXIT_FAILURE); + } - // Skip the first line as it is just the header - std::getline(file, line); - - // Loop through each line in the file - while(std::getline(file, line) && ros::ok()) { - // Loop variables - int i = 0; - std::istringstream s(line); - std::string field; - Eigen::Matrix temp; - // Loop through this line - while (getline(s,field,',')) { - // Ensure we are in the range - if(i > 16) { - ROS_ERROR("ERROR: Invalid groudtruth line, too long!"); - ROS_ERROR("ERROR: %s",line.c_str()); - std::exit(EXIT_FAILURE); + // Skip the first line as it is just the header + std::getline(file, line); + + // Loop through each line in the file + while (std::getline(file, line) && ros::ok()) { + // Loop variables + int i = 0; + std::istringstream s(line); + std::string field; + Eigen::Matrix temp; + // Loop through this line + while (getline(s, field, ',')) { + // Ensure we are in the range + if (i > 16) { + ROS_ERROR("ERROR: Invalid groudtruth line, too long!"); + ROS_ERROR("ERROR: %s", line.c_str()); + std::exit(EXIT_FAILURE); + } + // Save our groundtruth state value + temp(i, 0) = std::atof(field.c_str()); + i++; } - // Save our groundtruth state value - temp(i,0) = std::atof(field.c_str()); - i++; + // Append to our groundtruth map + gt_states.insert({1e-9 * temp(0, 0), temp}); } - // Append to our groundtruth map - gt_states.insert({1e-9*temp(0,0),temp}); + file.close(); } - file.close(); -} + /** + * @brief Gets the 16x1 groundtruth state at a given timestep + * @param timestep timestep we want to get the groundtruth for + * @param imustate groundtruth state [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel] + * @return true if we found the state, false otherwise + */ + bool get_gt_state(double timestep, Eigen::Matrix &imustate) { -/** - * @brief Gets the 16x1 groundtruth state at a given timestep - * @param timestep timestep we want to get the groundtruth for - * @param imustate groundtruth state [time(sec),q_GtoI,p_IinG,v_IinG,b_gyro,b_accel] - * @return true if we found the state, false otherwise - */ -bool get_gt_state(double timestep, Eigen::Matrix& imustate) { + // Check that we even have groundtruth loaded + if (gt_states.empty()) { + ROS_ERROR_THROTTLE(5, + "Groundtruth data loaded is empty, make sure you call load before asking for a state."); + return false; + } - // Check that we even have groundtruth loaded - if(gt_states.empty()) { - ROS_ERROR_THROTTLE(5,"Groundtruth data loaded is empty, make sure you call load before asking for a state."); - return false; - } + // Check that we have the timestamp in our GT file + if (gt_states.find(timestep) == gt_states.end()) { + ROS_WARN_THROTTLE(5, "Unable to find %.6f timestamp in GT file, wrong GT file loaded???", timestep); + return false; + } - // Check that we have the timestamp in our GT file - if(gt_states.find(timestep) == gt_states.end()) { - ROS_WARN_THROTTLE(5,"Unable to find %.6f timestamp in GT file, wrong GT file loaded???",timestep); - return false; + // Get the GT state vector + Eigen::Matrix state = gt_states[timestep]; + + // Our "fixed" state vector from the ETH GT format [q,p,v,bg,ba] + imustate(0, 0) = state(5, 0); //quat + imustate(1, 0) = state(6, 0); + imustate(2, 0) = state(7, 0); + imustate(3, 0) = state(4, 0); + imustate(4, 0) = state(1, 0); //pos + imustate(5, 0) = state(2, 0); + imustate(6, 0) = state(3, 0); + imustate(7, 0) = state(8, 0); //vel + imustate(8, 0) = state(9, 0); + imustate(9, 0) = state(10, 0); + imustate(10, 0) = state(11, 0); //bg + imustate(11, 0) = state(12, 0); + imustate(12, 0) = state(13, 0); + imustate(13, 0) = state(14, 0); //ba + imustate(14, 0) = state(15, 0); + imustate(15, 0) = state(16, 0); + + // Success! + return true; } - // Get the GT state vector - Eigen::Matrix state = gt_states[timestep]; - - // Our "fixed" state vector from the ETH GT format [q,p,v,bg,ba] - imustate(0,0) = state(5,0); //quat - imustate(1,0) = state(6,0); - imustate(2,0) = state(7,0); - imustate(3,0) = state(4,0); - imustate(4,0) = state(1,0); //pos - imustate(5,0) = state(2,0); - imustate(6,0) = state(3,0); - imustate(7,0) = state(8,0); //vel - imustate(8,0) = state(9,0); - imustate(9,0) = state(10,0); - imustate(10,0) = state(11,0); //bg - imustate(11,0) = state(12,0); - imustate(12,0) = state(13,0); - imustate(13,0) = state(14,0); //ba - imustate(14,0) = state(15,0); - imustate(15,0) = state(16,0); - - // Success! - return true; } - - #endif /* OV_CORE_DATASET_READER_H */ \ No newline at end of file diff --git a/ov_core/src/utils/quat_ops.h b/ov_core/src/utils/quat_ops.h index 0c04abc63..6d249ed30 100644 --- a/ov_core/src/utils/quat_ops.h +++ b/ov_core/src/utils/quat_ops.h @@ -2,7 +2,7 @@ #define OV_CORE_QUAT_OPS_H /** - * @file JPL quaternion math utilities + * @file quat_ops.h * @brief JPL quaternion math utilities * * @section Summary @@ -54,309 +54,318 @@ using namespace std; /** - * \brief Returns a JPL quaternion from a rotation matrix - * \param[in] rot 3x3 rotation matrix - * \return 4x1 quaternion - * - * This is based on the equation 74 in [Trawny2005]. - * In the implementation, we have 4 statements so that we avoid a division by zero and - * instead always divide by the largest diagonal element. This all comes from the - * definition of a rotation matrix, using the diagonal elements and an off-diagonal. - * \f[ - * \mathbf{R}(\bar{q})= - * \begin{bmatrix} - * q_1^2-q_2^2-q_3^2+q_4^2 & 2(q_1q_2+q_3q_4) & 2(q_1q_3-q_2q_4) \\ - * 2(q_1q_2-q_3q_4) & -q_2^2+q_2^2-q_3^2+q_4^2 & 2(q_2q_3+q_1q_4) \\ - * 2(q_1q_3+q_2q_4) & 2(q_2q_3-q_1q_4) & -q_1^2-q_2^2+q_3^2+q_4^2 - * \end{bmatrix} - * \f] + * @namespace ov_core + * @brief Core algorithms for Open VINS */ -inline Eigen::Matrix rot_2_quat(const Eigen::Matrix& rot) { - Eigen::Matrix q; - double T = rot.trace(); - if ((rot(0, 0) >= T) && (rot(0, 0) >= rot(1, 1)) && (rot(0, 0) >= rot(2, 2))) { - //cout << "case 1- " << endl; - q(0) = sqrt((1 + (2 * rot(0, 0)) - T) / 4); - q(1) = (1 / (4 * q(0))) * (rot(0, 1) + rot(1, 0)); - q(2) = (1 / (4 * q(0))) * (rot(0, 2) + rot(2, 0)); - q(3) = (1 / (4 * q(0))) * (rot(1, 2) - rot(2, 1)); - - } - else if ((rot(1, 1) >= T) && (rot(1, 1) >= rot(0, 0)) && (rot(1, 1) >= rot(2, 2))) { - //cout << "case 2- " << endl; - q(1) = sqrt((1 + (2 * rot(1, 1)) - T) / 4); - q(0) = (1 / (4 * q(1))) * (rot(0, 1) + rot(1, 0)); - q(2) = (1 / (4 * q(1))) * (rot(1, 2) + rot(2, 1)); - q(3) = (1 / (4 * q(1))) * (rot(2, 0) - rot(0, 2)); - } - else if ((rot(2, 2) >= T) && (rot(2, 2) >= rot(0, 0)) && (rot(2, 2) >= rot(1, 1))) { - //cout << "case 3- " << endl; - q(2) = sqrt((1 + (2 * rot(2, 2)) - T) / 4); - q(0) = (1 / (4 * q(2))) * (rot(0, 2) + rot(2, 0)); - q(1) = (1 / (4 * q(2))) * (rot(1, 2) + rot(2, 1)); - q(3) = (1 / (4 * q(2))) * (rot(0, 1) - rot(1, 0)); - } - else { - //cout << "case 4- " << endl; - q(3) = sqrt((1 + T) / 4); - q(0) = (1 / (4 * q(3))) * (rot(1, 2) - rot(2, 1)); - q(1) = (1 / (4 * q(3))) * (rot(2, 0) - rot(0, 2)); - q(2) = (1 / (4 * q(3))) * (rot(0, 1) - rot(1, 0)); - } - if (q(3) < 0) { - q = -q; +namespace ov_core { + + + /** + * \brief Returns a JPL quaternion from a rotation matrix + * \param[in] rot 3x3 rotation matrix + * \return 4x1 quaternion + * + * This is based on the equation 74 in [Trawny2005]. + * In the implementation, we have 4 statements so that we avoid a division by zero and + * instead always divide by the largest diagonal element. This all comes from the + * definition of a rotation matrix, using the diagonal elements and an off-diagonal. + * \f[ + * \mathbf{R}(\bar{q})= + * \begin{bmatrix} + * q_1^2-q_2^2-q_3^2+q_4^2 & 2(q_1q_2+q_3q_4) & 2(q_1q_3-q_2q_4) \\ + * 2(q_1q_2-q_3q_4) & -q_2^2+q_2^2-q_3^2+q_4^2 & 2(q_2q_3+q_1q_4) \\ + * 2(q_1q_3+q_2q_4) & 2(q_2q_3-q_1q_4) & -q_1^2-q_2^2+q_3^2+q_4^2 + * \end{bmatrix} + * \f] + */ + inline Eigen::Matrix rot_2_quat(const Eigen::Matrix &rot) { + Eigen::Matrix q; + double T = rot.trace(); + if ((rot(0, 0) >= T) && (rot(0, 0) >= rot(1, 1)) && (rot(0, 0) >= rot(2, 2))) { + //cout << "case 1- " << endl; + q(0) = sqrt((1 + (2 * rot(0, 0)) - T) / 4); + q(1) = (1 / (4 * q(0))) * (rot(0, 1) + rot(1, 0)); + q(2) = (1 / (4 * q(0))) * (rot(0, 2) + rot(2, 0)); + q(3) = (1 / (4 * q(0))) * (rot(1, 2) - rot(2, 1)); + + } else if ((rot(1, 1) >= T) && (rot(1, 1) >= rot(0, 0)) && (rot(1, 1) >= rot(2, 2))) { + //cout << "case 2- " << endl; + q(1) = sqrt((1 + (2 * rot(1, 1)) - T) / 4); + q(0) = (1 / (4 * q(1))) * (rot(0, 1) + rot(1, 0)); + q(2) = (1 / (4 * q(1))) * (rot(1, 2) + rot(2, 1)); + q(3) = (1 / (4 * q(1))) * (rot(2, 0) - rot(0, 2)); + } else if ((rot(2, 2) >= T) && (rot(2, 2) >= rot(0, 0)) && (rot(2, 2) >= rot(1, 1))) { + //cout << "case 3- " << endl; + q(2) = sqrt((1 + (2 * rot(2, 2)) - T) / 4); + q(0) = (1 / (4 * q(2))) * (rot(0, 2) + rot(2, 0)); + q(1) = (1 / (4 * q(2))) * (rot(1, 2) + rot(2, 1)); + q(3) = (1 / (4 * q(2))) * (rot(0, 1) - rot(1, 0)); + } else { + //cout << "case 4- " << endl; + q(3) = sqrt((1 + T) / 4); + q(0) = (1 / (4 * q(3))) * (rot(1, 2) - rot(2, 1)); + q(1) = (1 / (4 * q(3))) * (rot(2, 0) - rot(0, 2)); + q(2) = (1 / (4 * q(3))) * (rot(0, 1) - rot(1, 0)); + } + if (q(3) < 0) { + q = -q; + } + // normalize and return + q = q / (q.norm()); + return q; } - // normalize and return - q = q / (q.norm()); - return q; -} - -/** - * @brief Skew-symmetric matrix from a given 3x1 vector - * @param[in] w 3x1 vector to be made a skew-symmetric - * @return 3x3 skew-symmetric matrix - * - * This is based on equation 6 in [Trawny2005]: - * @f[ - * \lfloor\mathbf{v}\times\rfloor = - * \begin{bmatrix} - * 0 & -v_3 & v_1 \\ v_3 & 0 & -v_1 \\ -v_2 & v_1 & 0 - * \end{bmatrix} - * @f] - */ -inline Eigen::Matrix skew_x(const Eigen::Matrix& w) { - Eigen::Matrix w_x; - w_x << 0, -w(2), w(1), - w(2), 0, -w(0), - -w(1), w(0), 0; - return w_x; -} - - -/** - * @brief Converts JPL quaterion to SO(3) rotation matrix - * @param[in] q JPL quaternion - * @return 3x3 SO(3) rotation matrix - * - * This is based on equation 62 in [Trawny2005]: - * @f[ - * \mathbf{R} = (2q_4^2-1)\mathbf{I}_3-2q_4\lfloor\mathbf{q}\times\rfloor+2\mathbf{q}^\top\mathbf{q} - * @f] - */ -inline Eigen::Matrix quat_2_Rot(const Eigen::Matrix& q) { - Eigen::Matrix q_x = skew_x(q.block(0, 0, 3, 1)); - Eigen::MatrixXd Rot = (2 * std::pow(q(3, 0),2) - 1) * Eigen::MatrixXd::Identity(3, 3) - - 2 * q(3, 0) * q_x + - 2 * q.block(0, 0, 3, 1) * (q.block(0, 0, 3, 1).transpose()); - return Rot; -} - -/** - * @brief Multiply two JPL quaternions - * @param[in] q First JPL quaternion - * @param[in] p Second JPL quaternion - * @return 4x1 resulting p*q quaternion - * - * This is based on equation 9 in [Trawny2005]. - * We also enforce that the quaternion is unique by having q_4 be greater than zero. - * @f[ - * \bar{q}\otimes\bar{p}= - * \mathcal{L}(\bar{q})\bar{p}= - * \begin{bmatrix} - * q_4\mathbf{I}_3+\lfloor\mathbf{q}\times\rfloor & \mathbf{q} \\ - * -\mathbf{q}^\top & q_4 - * \end{bmatrix} - * \begin{bmatrix} - * \mathbf{p} \\ p_4 - * \end{bmatrix} - * @f] - */ -inline Eigen::Matrix quat_multiply(const Eigen::Matrix& q, const Eigen::Matrix& p) { - Eigen::Matrix q_t; - Eigen::Matrix Qm; - // create big L matrix - Qm.block(0, 0, 3, 3) = q(3, 0) * Eigen::MatrixXd::Identity(3, 3) - skew_x(q.block(0, 0, 3, 1)); - Qm.block(0, 3, 3, 1) = q.block(0, 0, 3, 1); - Qm.block(3, 0, 1, 3) = -q.block(0, 0, 3, 1).transpose(); - Qm(3, 3) = q(3, 0); - q_t = Qm * p; - // ensure unique by forcing q_4 to be >0 - if (q_t(3,0) < 0){ - q_t *= -1; + /** + * @brief Skew-symmetric matrix from a given 3x1 vector + * @param[in] w 3x1 vector to be made a skew-symmetric + * @return 3x3 skew-symmetric matrix + * + * This is based on equation 6 in [Trawny2005]: + * @f[ + * \lfloor\mathbf{v}\times\rfloor = + * \begin{bmatrix} + * 0 & -v_3 & v_2 \\ v_3 & 0 & -v_1 \\ -v_2 & v_1 & 0 + * \end{bmatrix} + * @f] + */ + inline Eigen::Matrix skew_x(const Eigen::Matrix &w) { + Eigen::Matrix w_x; + w_x << 0, -w(2), w(1), + w(2), 0, -w(0), + -w(1), w(0), 0; + return w_x; } - // normalize and return - return q_t/q_t.norm(); -} -/** - * @brief Returns vector portion of skew-symmetric - * @param[in] w_x skew-symmetric matrix - * @return 3x1 vector portion of skew - * - * See skew_x() for details. - */ -inline Eigen::Matrix vee(const Eigen::Matrix& w_x) { - Eigen::Matrix w; - w << w_x(2, 1), w_x(0, 2), w_x(1, 0); - return w; -} - - - -/** - * @brief SO(3) matrix exponential - * @param[in] w 3x1 vector we will take the exponential of - * @return SO(3) rotation matrix - * - * SO(3) matrix exponential mapping from the vector to SO(3) lie group. - * This formula ends up being the [Rodrigues formula](https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula). - * This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 15. - * http://ethaneade.com/lie.pdf - * - * @f[ - * \exp\colon\mathfrak{so}(3)\to SO(3) - * @f] - * @f[ - * \exp(\mathbf{v}) = - * \mathbf{I} - * +\frac{\sin{\theta}}{\theta}\lfloor\mathbf{v}\times\rfloor - * +\frac{1-\cos{\theta}}{\theta^2}\lfloor\mathbf{v}\times\rfloor^2 - * @f] - * @f[ - * \mathrm{where}\quad \theta^2 = \mathbf{v}^\top\mathbf{v} - * @f] - * - * @todo Ethan Eade notes that one should use the Taylor series expansion of the coefficients - * of the second and third terms when theta is small. Should we use that instead?? - * Right now we just handle if theta is zero, we set the exponential to be identity. - */ -inline Eigen::Matrix Exp(const Eigen::Matrix& w) { - - Eigen::Matrix w_x = skew_x(w); - - double theta = w.norm(); - - Eigen::Matrix R; - - if (theta ==0){ - R = Eigen::MatrixXd::Identity(3,3); + /** + * @brief Converts JPL quaterion to SO(3) rotation matrix + * @param[in] q JPL quaternion + * @return 3x3 SO(3) rotation matrix + * + * This is based on equation 62 in [Trawny2005]: + * @f[ + * \mathbf{R} = (2q_4^2-1)\mathbf{I}_3-2q_4\lfloor\mathbf{q}\times\rfloor+2\mathbf{q}^\top\mathbf{q} + * @f] + */ + inline Eigen::Matrix quat_2_Rot(const Eigen::Matrix &q) { + Eigen::Matrix q_x = skew_x(q.block(0, 0, 3, 1)); + Eigen::MatrixXd Rot = (2 * std::pow(q(3, 0), 2) - 1) * Eigen::MatrixXd::Identity(3, 3) + - 2 * q(3, 0) * q_x + + 2 * q.block(0, 0, 3, 1) * (q.block(0, 0, 3, 1).transpose()); + return Rot; } - else{ - R = Eigen::MatrixXd::Identity(3, 3) + (sin(theta) / theta) * (w_x) + ((1 - cos(theta)) / pow(theta,2)) * w_x * w_x; - } - return R; -} - -/** - * @brief SO(3) matrix logarithm - * @param[in] R 3x3 SO(3) rotation matrix - * @return 3x3 skew-symmetric matrix - * - * This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 17 & 18. - * http://ethaneade.com/lie.pdf - * - * @f[ - * \theta = \textrm{arccos}(0.5(\textrm{trace}(\mathbf{R})-1)) - * @f] - * @f[ - * \lfloor\mathbf{v}\times\rfloor = \frac{\theta}{2\sin{\theta}}(\mathbf{R}-\mathbf{R}^\top) - * @f] - * - * @todo Ethan Eade notes that one should use the Taylor series expansion of the coefficients - * of the second and third terms when theta is small. Should we use that instead?? - * Right now we just handle if near the identity, we set the log to be all zeros. - */ -inline Eigen::Matrix Log(const Eigen::Matrix& R) { - Eigen::Matrix w_x; - // magnitude of the skew elements - double theta = std::acos(0.5 * (R.trace() - 1)); - // calculate the skew symetric matrix - w_x = (theta / (2 * sin(theta))) * (R - R.transpose()); - // check if we are near the identity - if (R != Eigen::MatrixXd::Identity(3,3)) { - return w_x; - } else { - return Eigen::MatrixXd::Zero(3,3); + /** + * @brief Multiply two JPL quaternions + * @param[in] q First JPL quaternion + * @param[in] p Second JPL quaternion + * @return 4x1 resulting p*q quaternion + * + * This is based on equation 9 in [Trawny2005]. + * We also enforce that the quaternion is unique by having q_4 be greater than zero. + * @f[ + * \bar{q}\otimes\bar{p}= + * \mathcal{L}(\bar{q})\bar{p}= + * \begin{bmatrix} + * q_4\mathbf{I}_3+\lfloor\mathbf{q}\times\rfloor & \mathbf{q} \\ + * -\mathbf{q}^\top & q_4 + * \end{bmatrix} + * \begin{bmatrix} + * \mathbf{p} \\ p_4 + * \end{bmatrix} + * @f] + */ + inline Eigen::Matrix + quat_multiply(const Eigen::Matrix &q, const Eigen::Matrix &p) { + Eigen::Matrix q_t; + Eigen::Matrix Qm; + // create big L matrix + Qm.block(0, 0, 3, 3) = q(3, 0) * Eigen::MatrixXd::Identity(3, 3) - skew_x(q.block(0, 0, 3, 1)); + Qm.block(0, 3, 3, 1) = q.block(0, 0, 3, 1); + Qm.block(3, 0, 1, 3) = -q.block(0, 0, 3, 1).transpose(); + Qm(3, 3) = q(3, 0); + q_t = Qm * p; + // ensure unique by forcing q_4 to be >0 + if (q_t(3, 0) < 0) { + q_t *= -1; + } + // normalize and return + return q_t / q_t.norm(); } -} - -/** - * \brief JPL Quaternion inverse - * \param[in] q quaternion we want to change - * \return inversed quaternion - * - * See equation 21 in [Trawny2005]. - * \f[ - * \bar{q}^{-1} = \begin{bmatrix} -\mathbf{q} \\ q_4 \end{bmatrix} - * \f] - */ -inline Eigen::Matrix Inv(Eigen::Matrix q){ - Eigen::Matrix qinv; - qinv.block(0,0,3,1)= -q.block(0,0,3,1); - qinv(3,0)= q(3,0); - return qinv; - -} - -/** - * \brief equation (48) of trawny tech report - */ -inline Eigen::Matrix Omega(Eigen::Matrix w ){ - Eigen::Matrix mat; - mat.block(0,0,3,3) = -skew_x(w); - mat.block(3,0,1,3) = -w.transpose(); - mat.block(0,3,3,1) = w; - mat(3,3) = 0; - return mat; -}; -/** - * \brief Normalizes a quaternion to make sure it is unit norm - */ -inline Eigen::Matrix quatnorm(Eigen::Matrix q_t) { - if (q_t(3,0) <0){ - q_t*=-1; + /** + * @brief Returns vector portion of skew-symmetric + * @param[in] w_x skew-symmetric matrix + * @return 3x1 vector portion of skew + * + * See skew_x() for details. + */ + inline Eigen::Matrix vee(const Eigen::Matrix &w_x) { + Eigen::Matrix w; + w << w_x(2, 1), w_x(0, 2), w_x(1, 0); + return w; } - return q_t/q_t.norm(); -}; - -/** - * Computes left Jacobian of SO(3) - * @param w axis-angle - */ -inline Eigen::Matrix Jl_so3(Eigen::Matrix w ){ - double theta= w.norm(); + /** + * @brief SO(3) matrix exponential + * @param[in] w 3x1 vector we will take the exponential of + * @return SO(3) rotation matrix + * + * SO(3) matrix exponential mapping from the vector to SO(3) lie group. + * This formula ends up being the [Rodrigues formula](https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula). + * This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 15. + * http://ethaneade.com/lie.pdf + * + * @f[ + * \exp\colon\mathfrak{so}(3)\to SO(3) + * @f] + * @f[ + * \exp(\mathbf{v}) = + * \mathbf{I} + * +\frac{\sin{\theta}}{\theta}\lfloor\mathbf{v}\times\rfloor + * +\frac{1-\cos{\theta}}{\theta^2}\lfloor\mathbf{v}\times\rfloor^2 + * @f] + * @f[ + * \mathrm{where}\quad \theta^2 = \mathbf{v}^\top\mathbf{v} + * @f] + * + * @m_class{m-block m-warning} + * + * @par TODO + * Ethan Eade notes that one should use the Taylor series expansion of the coefficients + * of the second and third terms when theta is small. Should we use that instead?? + * Right now we just handle if theta is zero, we set the exponential to be identity. + */ + inline Eigen::Matrix Exp(const Eigen::Matrix &w) { + + Eigen::Matrix w_x = skew_x(w); + + double theta = w.norm(); + + Eigen::Matrix R; + + if (theta == 0) { + R = Eigen::MatrixXd::Identity(3, 3); + } else { + R = Eigen::MatrixXd::Identity(3, 3) + (sin(theta) / theta) * (w_x) + + ((1 - cos(theta)) / pow(theta, 2)) * w_x * w_x; + } + return R; - if (theta < 1e-12){ - return Eigen::MatrixXd::Identity(3,3); } - else{ - Eigen::Matrix a= w/theta; - Eigen::Matrix J= sin(theta)/theta*Eigen::MatrixXd::Identity(3,3)+ (1-sin(theta)/theta)*a*a.transpose()+ - ((1-cos(theta))/theta)*skew_x(a); - return J; + + /** + * @brief SO(3) matrix logarithm + * @param[in] R 3x3 SO(3) rotation matrix + * @return 3x3 skew-symmetric matrix + * + * This definition was taken from "Lie Groups for 2D and 3D Transformations" by Ethan Eade equation 17 & 18. + * http://ethaneade.com/lie.pdf + * + * @f[ + * \theta = \textrm{arccos}(0.5(\textrm{trace}(\mathbf{R})-1)) + * @f] + * @f[ + * \lfloor\mathbf{v}\times\rfloor = \frac{\theta}{2\sin{\theta}}(\mathbf{R}-\mathbf{R}^\top) + * @f] + * + * @m_class{m-block m-warning} + * + * @par TODO + * Ethan Eade notes that one should use the Taylor series expansion of the coefficients + * of the second and third terms when theta is small. Should we use that instead?? + * Right now we just handle if near the identity, we set the log to be all zeros. + */ + inline Eigen::Matrix Log(const Eigen::Matrix &R) { + Eigen::Matrix w_x; + // magnitude of the skew elements + double theta = std::acos(0.5 * (R.trace() - 1)); + // calculate the skew symetric matrix + w_x = (theta / (2 * sin(theta))) * (R - R.transpose()); + // check if we are near the identity + if (R != Eigen::MatrixXd::Identity(3, 3)) { + return w_x; + } else { + return Eigen::MatrixXd::Zero(3, 3); + } } -}; -/** - * Computes right Jacobian of SO(3) - * @param w axis-angle - */ -inline Eigen::Matrix Jr_so3(Eigen::Matrix w ){ + /** + * @brief JPL Quaternion inverse + * @param[in] q quaternion we want to change + * @return inversed quaternion + * + * See equation 21 in [Trawny2005]. + * \f[ + * \bar{q}^{-1} = \begin{bmatrix} -\mathbf{q} \\ q_4 \end{bmatrix} + * \f] + */ + inline Eigen::Matrix Inv(Eigen::Matrix q) { + Eigen::Matrix qinv; + + qinv.block(0, 0, 3, 1) = -q.block(0, 0, 3, 1); + qinv(3, 0) = q(3, 0); + return qinv; - return Jl_so3(-w); -}; + } + /** + * @brief equation (48) of trawny tech report [Trawny2005] + */ + inline Eigen::Matrix Omega(Eigen::Matrix w) { + Eigen::Matrix mat; + mat.block(0, 0, 3, 3) = -skew_x(w); + mat.block(3, 0, 1, 3) = -w.transpose(); + mat.block(0, 3, 3, 1) = w; + mat(3, 3) = 0; + return mat; + }; + + /** + * @brief Normalizes a quaternion to make sure it is unit norm + */ + inline Eigen::Matrix quatnorm(Eigen::Matrix q_t) { + if (q_t(3, 0) < 0) { + q_t *= -1; + } + return q_t / q_t.norm(); + }; + + /** + * @brief Computes left Jacobian of SO(3) + * @param w axis-angle + */ + inline Eigen::Matrix Jl_so3(Eigen::Matrix w) { + + double theta = w.norm(); + + if (theta < 1e-12) { + return Eigen::MatrixXd::Identity(3, 3); + } else { + Eigen::Matrix a = w / theta; + Eigen::Matrix J = sin(theta) / theta * Eigen::MatrixXd::Identity(3, 3) + + (1 - sin(theta) / theta) * a * a.transpose() + + ((1 - cos(theta)) / theta) * skew_x(a); + return J; + + } + }; + + /** + * @brief Computes right Jacobian of SO(3) + * @param w axis-angle + */ + inline Eigen::Matrix Jr_so3(Eigen::Matrix w) { + + return Jl_so3(-w); + }; +} #endif /* OV_CORE_QUAT_OPS_H */ \ No newline at end of file diff --git a/ov_msckf/CMakeLists.txt b/ov_msckf/CMakeLists.txt new file mode 100644 index 000000000..5ebe6e22b --- /dev/null +++ b/ov_msckf/CMakeLists.txt @@ -0,0 +1,79 @@ +cmake_minimum_required(VERSION 2.8.8) + +# Project name +project(ov_msckf) + +# Include our cmake files +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/) + +# Find catkin (the ROS build system) +find_package(catkin REQUIRED COMPONENTS roscpp rosbag tf std_msgs geometry_msgs sensor_msgs nav_msgs visualization_msgs cv_bridge ov_core) + +# Include libraries +find_package(Eigen3 REQUIRED) +find_package(OpenCV 3 REQUIRED) +find_package(Boost REQUIRED COMPONENTS system filesystem thread date_time) + +# display message to user +message(STATUS "EIGEN VERSION: " ${EIGEN3_VERSION}) +message(STATUS "OPENCV VERSION: " ${OpenCV_VERSION}) +message(STATUS "BOOST VERSION: " ${Boost_VERSION}) + +# Describe catkin project +catkin_package( + CATKIN_DEPENDS roscpp rosbag tf std_msgs geometry_msgs sensor_msgs nav_msgs visualization_msgs cv_bridge ov_core + INCLUDE_DIRS src +) + +# Try to compile with c++11 +# http://stackoverflow.com/a/25836953 +include(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) +if(COMPILER_SUPPORTS_CXX11) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +elseif(COMPILER_SUPPORTS_CXX0X) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +else() + message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +endif() + +# Enable compile optimizations +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fsee -fomit-frame-pointer -fno-signed-zeros -fno-math-errno -funroll-loops") + +# Enable debug flags (use if you want to debug in gdb) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g3 -Wall") + +# Include our header files +include_directories( + src + ${EIGEN3_INCLUDE_DIR} + ${Boost_INCLUDE_DIRS} + ${catkin_INCLUDE_DIRS} +) + +# Set link libraries used by all binaries +list(APPEND thirdparty_libraries + ${Boost_LIBRARIES} + ${OpenCV_LIBRARIES} + ${catkin_LIBRARIES} +) + +################################################## +# Make the core library +################################################## +add_library(ov_msckf + src/state/State.cpp + src/state/StateHelper.cpp + src/state/Propagator.cpp +) +target_link_libraries(ov_msckf ${thirdparty_libraries}) + + +################################################## +# Make binary files! +################################################## +add_executable(test_msckf src/test_msckf.cpp) +target_link_libraries(test_msckf ov_msckf ${thirdparty_libraries}) + + diff --git a/ov_msckf/package.xml b/ov_msckf/package.xml new file mode 100644 index 000000000..1234d15a4 --- /dev/null +++ b/ov_msckf/package.xml @@ -0,0 +1,48 @@ + + + + ov_msckf + 1.0.0 + + Implementation of the MSCKF. + + + + Patrick Geneva + Kevin Eckenhoff + Patrick Geneva + Kevin Eckenhoff + + + All Rights Reserved + + + catkin + + + cmake_modules + roscpp + rosbag + tf + std_msgs + geometry_msgs + sensor_msgs + nav_msgs + visualization_msgs + cv_bridge + ov_core + + + roscpp + rosbag + tf + std_msgs + geometry_msgs + sensor_msgs + nav_msgs + visualization_msgs + cv_bridge + ov_core + + + \ No newline at end of file diff --git a/ov_core/src/state/Propagator.cpp b/ov_msckf/src/state/Propagator.cpp similarity index 57% rename from ov_core/src/state/Propagator.cpp rename to ov_msckf/src/state/Propagator.cpp index 8d94675b9..8af0152cf 100644 --- a/ov_core/src/state/Propagator.cpp +++ b/ov_msckf/src/state/Propagator.cpp @@ -1,11 +1,14 @@ -// -// Created by keck on 6/4/19. -// - #include "Propagator.h" -bool Propagator::propagateAndClone(State* state, double timestamp){ + +using namespace ov_core; +using namespace ov_msckf; + + + + +void Propagator::propagate_and_clone(State* state, double timestamp){ // First lets construct an IMU vector of measurements we need vector prop_data; @@ -19,21 +22,20 @@ bool Propagator::propagateAndClone(State* state, double timestamp){ // Ensure we have some measurements in the first place! if(imu_data.empty()) { - //ROS_ERROR("[TIME-SYNC]: There are no IMU measurements!!!!!"); - //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); - //ROS_ERROR("%s on line %d",__FILE__,__LINE__); - //std::exit(EXIT_FAILURE); + std::cerr << "Propagator::propagate_and_clone(): There are no IMU measurements!!!!!" << std::endl; + std::cerr << "Propagator::propagate_and_clone(): IMU-CAMERA are likely messed up, check time offset value!!!" << std::endl; + std::cerr << __FILE__ << " on line " << __LINE__ << std::endl; state->set_timestamp(timestamp); last_prop_time_offset = t_off_new; - return false; + return; } // If the difference between the current update time and state is zero // We should crash, as this means we would have two clones at the same time!!!! if(state->timestamp() == timestamp) { - //ROS_ERROR("[TIME-SYNC]: Propagation called again at same timestep at last update timestep!!!!"); - //ROS_ERROR("[TIME-SYNC]: %.15f vs %.15f timestamps",state->timestamp,timestamp); - //ROS_ERROR("%s on line %d",__FILE__,__LINE__); + std::cerr << "Propagator::propagate_and_clone(): Propagation called again at same timestep at last update timestep!!!!" << std::endl; + std::cerr << "Propagator::propagate_and_clone(): " << state->timestamp() << " vs " << timestamp << " timestamps" << std::endl; + std::cerr << __FILE__ << " on line " << __LINE__ << std::endl; std::exit(EXIT_FAILURE); } @@ -41,44 +43,40 @@ bool Propagator::propagateAndClone(State* state, double timestamp){ // Note we split measurements based on the given state time, and the update timestamp for(size_t i=0; i state->timestamp()+last_prop_time_offset && imu_data.at(i).timestamp <= - state->timestamp()+last_prop_time_offset) { + // START OF THE INTEGRATION PERIOD + // If the next timestamp is greater then our current state time + // And the current is not greater then it yet... + // Then we should "split" our current IMU measurement + if(imu_data.at(i+1).timestamp > state->timestamp()+last_prop_time_offset && imu_data.at(i).timestamp <= state->timestamp()+last_prop_time_offset) { IMUDATA data = interpolate_data(imu_data.at(i),imu_data.at(i+1),state->timestamp()+last_prop_time_offset); prop_data.push_back(data); - //ROS_INFO("propagation #%d = CASE 1 = %.3f => %.3f", - // (int)i,data.timestamp-prop_data.at(0).timestamp,state->timestamp-prop_data.at(0).timestamp); + //ROS_INFO("propagation #%d = CASE 1 = %.3f => %.3f", (int)i,data.timestamp-prop_data.at(0).timestamp,state->timestamp-prop_data.at(0).timestamp); continue; } - // MIDDLE OF INTEGRATION PERIOD - // If our imu measurement is right in the middle of our propagation period - // Then we should just append the whole measurement time to our propagation vector - if(imu_data.at(i).timestamp >= state->timestamp()+last_prop_time_offset && imu_data.at(i+1).timestamp <= - timestamp+t_off_new) { + // MIDDLE OF INTEGRATION PERIOD + // If our imu measurement is right in the middle of our propagation period + // Then we should just append the whole measurement time to our propagation vector + if(imu_data.at(i).timestamp >= state->timestamp()+last_prop_time_offset && imu_data.at(i+1).timestamp <= timestamp+t_off_new) { prop_data.push_back(imu_data.at(i)); - //ROS_INFO("propagation #%d = CASE 2 = %.3f",(int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp); + //ROS_INFO("propagation #%d = CASE 2 = %.3f",(int)i,imu_data.at(i).timestamp-prop_data.at(0).timestamp); continue; } - // END OF THE INTEGRATION PERIOD - // If the current timestamp is greater then our update time - // We should just "split" the NEXT IMU measurement to the update time, - // NOTE: we add the current time, and then the time at the end of the inverval (so we can get a dt) - // NOTE: we also break out of this loop, as this is the last IMU measurement we need! + // END OF THE INTEGRATION PERIOD + // If the current timestamp is greater then our update time + // We should just "split" the NEXT IMU measurement to the update time, + // NOTE: we add the current time, and then the time at the end of the inverval (so we can get a dt) + // NOTE: we also break out of this loop, as this is the last IMU measurement we need! if(imu_data.at(i+1).timestamp > timestamp+t_off_new) { prop_data.push_back(imu_data.at(i)); - // If the added IMU message doesn't end exactly at the camera time - // Then we need to add another one that is right at the ending time + // If the added IMU message doesn't end exactly at the camera time + // Then we need to add another one that is right at the ending time if(prop_data.at(prop_data.size()-1).timestamp != timestamp+t_off_new) { IMUDATA data = interpolate_data(imu_data.at(i), imu_data.at(i + 1), timestamp + t_off_new); prop_data.push_back(data); } - //ROS_INFO("propagation #%d = CASE 3 = %.3f => %.3f", - // (int)i,data.timestamp-prop_data.at(0).timestamp,data.timestamp-prop_data.at(0).timestamp); + //ROS_INFO("propagation #%d = CASE 3 = %.3f => %.3f", (int)i,data.timestamp-prop_data.at(0).timestamp,data.timestamp-prop_data.at(0).timestamp); break; } @@ -86,13 +84,12 @@ bool Propagator::propagateAndClone(State* state, double timestamp){ // Check that we have at least one measurement to propagate with if(prop_data.empty()) { - //ROS_ERROR("[TIME-SYNC]: There are not enough measurements to propagate with %d of 2",(int)prop_data.size()); - //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); - //ROS_ERROR("%s on line %d",__FILE__,__LINE__); - //std::exit(EXIT_FAILURE); + std::cerr << "Propagator::propagate_and_clone(): There are not enough measurements to propagate with " << (int)prop_data.size() << " of 2" << std::endl; + std::cerr << "Propagator::propagate_and_clone(): IMU-CAMERA are likely messed up, check time offset value!!!" << std::endl; + std::cerr << __FILE__ << " on line " << __LINE__ << std::endl; state->set_timestamp(timestamp); last_prop_time_offset = t_off_new; - return false; + return; } @@ -101,53 +98,68 @@ bool Propagator::propagateAndClone(State* state, double timestamp){ if(imu_data.at(imu_data.size()-1).timestamp < timestamp+t_off_new) { IMUDATA data = interpolate_data(imu_data.at(imu_data.size()-2),imu_data.at(imu_data.size()-1),timestamp+t_off_new); prop_data.push_back(data); - //ROS_INFO("propagation #%d = CASE 4 = %.3f",(int)(imu_data.size()-1),data.timestamp-prop_data.at(0).timestamp); + //ROS_INFO("propagation #%d = CASE 4 = %.3f",(int)(imu_data.size()-1),data.timestamp-prop_data.at(0).timestamp); } - //ROS_INFO("end_prop - start_prop %.3f",prop_data.at(prop_data.size()-1).timestamp-prop_data.at(0).timestamp); - //ROS_INFO("imu_data_last - state %.3f",imu_data.at(imu_data.size()-1).timestamp-state->timestamp()); - //ROS_INFO("update_time - state %.3f",timestamp-state->timestamp()); // Loop through and ensure we do not have an zero dt values // This would cause the noise covariance to be Infinity for (size_t i=0; i < prop_data.size()-1; i++){ - //cout << "dti- " << (prop_data.at(i+1).timestamp-prop_data.at(i).timestamp) << endl; if ((prop_data.at(i+1).timestamp-prop_data.at(i).timestamp) < 1e-8){ - //ROS_ERROR("[TIME-SYNC]: Zero DT between %d and %d measurements (dt = %.8f)",(int)i,(int)(i+1),(prop_data.at(i+1).timestamp-prop_data.at(i).timestamp)); - return false; + std::cerr << "Propagator::propagate_and_clone(): Zero DT between " << i << " and " << i+1 << " measurements (dt = " << (prop_data.at(i+1).timestamp-prop_data.at(i).timestamp) << ")" << std::endl; prop_data.erase(prop_data.begin()+i); i--; } } - // Check that we have at least one measurement to propagate with if(prop_data.empty()) { - //ROS_ERROR("[TIME-SYNC]: There are not enough measurements to propagate with %d of 2",(int)prop_data.size()); - //ROS_ERROR("[TIME-SYNC]: IMU-CAMERA are likely messed up, check time offset value!!!"); - //ROS_ERROR("%s on line %d",__FILE__,__LINE__); - //std::exit(EXIT_FAILURE); + std::cerr << "Propagator::propagate_and_clone(): There are not enough measurements to propagate with " << (int)prop_data.size() << " of 2" << std::endl; + std::cerr << "Propagator::propagate_and_clone(): IMU-CAMERA are likely messed up, check time offset value!!!" << std::endl; + std::cerr << __FILE__ << " on line " << __LINE__ << std::endl; state->set_timestamp(timestamp); last_prop_time_offset = t_off_new; - return false; + return; } + // We are going to sum up all the state transition matrices, so we can do a single large multiplication at the end + // Phi_summed = Phi_i*Phi_summed + // Q_summed = Phi_i*Q_summed*Phi_i^T + Q_i + // After summing we can multiple the total phi to get the updated covariance + // We will then add the noise to the IMU portion of the state Eigen::Matrix Phi_summed = Eigen::Matrix::Identity(); Eigen::Matrix Qd_summed = Eigen::Matrix::Zero(); + // Loop through all IMU messages, and use them to move the state forward in time + // This uses the zero'th order quat, and then constant acceleration discrete for(size_t i=0; i F = Eigen::Matrix::Zero(); + Eigen::Matrix Qdi = Eigen::Matrix::Zero(); + predict_and_compute(state, prop_data.at(i), prop_data.at(i+1), F, Qdi); + + // Next we should propagate our IMU covariance + // Pii' = F*Pii*F.transpose() + G*Q*G.transpose() + // Pci' = F*Pci and Pic' = Pic*F.transpose() + // NOTE: Here we are summing the state transition F so we can do a single mutiplication later + // NOTE: Phi_summed = Phi_i*Phi_summed + // NOTE: Q_summed = Phi_i*Q_summed*Phi_i^T + G*Q_i*G^T + Phi_summed = F * Phi_summed; + Qd_summed = F * Qd_summed * F.transpose() + Qdi; + } - //Last angular velocity (used for cloning when estimating time offset) + // Last angular velocity (used for cloning when estimating time offset) Eigen::Matrix last_w = prop_data.at(prop_data.size()-1).wm - state->imu()->bias_g(); - auto Cov = state->Cov(); + // For now assert that our IMU is at the top left of the covariance + assert(state->imu()->id()==0); - size_t imu_id = 0; // Do the update to the covariance with our "summed" state transition and IMU noise addition... + auto Cov = state->Cov(); + size_t imu_id = state->imu()->id(); Cov.block(imu_id,0,15,state->nVars()) = Phi_summed*Cov.block(imu_id,0,15,state->nVars()); Cov.block(0,imu_id,state->nVars(),15) = Cov.block(0,imu_id,state->nVars(),15)*Phi_summed.transpose(); Cov.block(imu_id,imu_id,15,15) += Qd_summed; @@ -155,18 +167,19 @@ bool Propagator::propagateAndClone(State* state, double timestamp){ // Ensure the covariance is symmetric Cov = 0.5*(Cov+Cov.transpose()); - //Set timestamp data + // Set timestamp data state->set_timestamp(timestamp); last_prop_time_offset = t_off_new; - - //Now perform stochastic cloning + // Now perform stochastic cloning StateHelper::augment_clone(state, last_w); } -void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_minus, const IMUDATA data_plus, - Eigen::Matrix &F, Eigen::Matrix &Qd){ + + +void Propagator::predict_and_compute(State *state, const IMUDATA data_minus, const IMUDATA data_plus, + Eigen::Matrix &F, Eigen::Matrix &Qd){ auto imu = state->imu(); @@ -176,11 +189,10 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min Eigen::Matrix w_hat = data_minus.wm - imu->bias_g(); Eigen::Matrix a_hat = data_minus.am - imu->bias_a(); - + // If we are averaging the IMU, then do so if (state->options().imu_avg){ Eigen::Matrix w_hat2 = data_plus.wm - imu->bias_g(); Eigen::Matrix a_hat2 = data_plus.am - imu->bias_a(); - w_hat = .5*(w_hat+w_hat2); a_hat = .5*(a_hat+a_hat2); } @@ -188,6 +200,7 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min // Pre-compute things double w_norm = w_hat.norm(); Eigen::Matrix I_4x4 = Eigen::Matrix::Identity(); + Eigen::Matrix R_Gtoi = imu->Rot(); // Orientation: Equation (101) and (103) and of Trawny indirect TR Eigen::Matrix bigO; @@ -198,27 +211,30 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min } Eigen::Matrix new_q = quatnorm(bigO*imu->quat()); - Eigen::Matrix R_Gtoi = imu->Rot(); - // Velocity: just the acceleration in the local frame, minus global gravity Eigen::Matrix new_v = imu->vel() + R_Gtoi.transpose()*a_hat*dt - _gravity*dt; // Position: just velocity times dt, with the acceleration integrated twice - Eigen::Matrix new_p = imu->pos() + imu->vel()*dt + 0.5*R_Gtoi.transpose()*a_hat*dt*dt - - 0.5*_gravity*dt*dt; - - //Indexing within Jacobian - int th_id = 0; - int p_id = 3; - int v_id = 6; - int bg_id = 9; - int ba_id = 12; - - //Allocate noise Jacobian + Eigen::Matrix new_p = imu->pos() + imu->vel()*dt + 0.5*R_Gtoi.transpose()*a_hat*dt*dt - 0.5*_gravity*dt*dt; + + // Indexing within Jacobian + //int th_id = 0; + //int p_id = 3; + //int v_id = 6; + //int bg_id = 9; + //int ba_id = 12; + + // Get the locations of each entry of the imu state + int th_id = state->imu()->q()->id()-state->imu()->id(); + int p_id = state->imu()->p()->id()-state->imu()->id(); + int v_id = state->imu()->v()->id()-state->imu()->id(); + int bg_id = state->imu()->bg()->id()-state->imu()->id(); + int ba_id = state->imu()->ba()->id()-state->imu()->id(); + + // Allocate noise Jacobian Eigen::Matrix G = Eigen::Matrix::Zero(); - - //Now compute Jacobian of new state wrt old state and noise + // Now compute Jacobian of new state wrt old state and noise if (state->options().do_fej) { Eigen::Matrix Rfej = imu->Rot_fej(); @@ -227,7 +243,6 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min Eigen::Matrix v_fej = imu->vel_fej(); Eigen::Matrix p_fej = imu->pos_fej(); - F.block(th_id, th_id, 3, 3) = dR; F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-w_hat * dt) * dt; F.block(bg_id, bg_id, 3, 3).setIdentity(); @@ -235,8 +250,7 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min F.block(v_id, v_id, 3, 3).setIdentity(); F.block(v_id, ba_id, 3, 3) = -Rfej.transpose() * dt; F.block(ba_id, ba_id, 3, 3).setIdentity(); - F.block(p_id, th_id, 3, 3).noalias() = - -skew_x(new_p-p_fej-v_fej*dt+.5*_gravity*dt*dt)*Rfej.transpose(); + F.block(p_id, th_id, 3, 3).noalias() = -skew_x(new_p-p_fej-v_fej*dt+.5*_gravity*dt*dt)*Rfej.transpose(); F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; F.block(p_id, ba_id, 3, 3) = -0.5 * Rfej.transpose() * dt * dt; F.block(p_id, p_id, 3, 3).setIdentity(); @@ -256,8 +270,7 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min F.block(v_id, v_id, 3, 3).setIdentity(); F.block(v_id, ba_id, 3, 3) = -R_Gtoi.transpose() * dt; F.block(ba_id, ba_id, 3, 3).setIdentity(); - F.block(p_id, th_id, 3, 3).noalias() = - -0.5 *R_Gtoi.transpose() * skew_x(a_hat * dt * dt); + F.block(p_id, th_id, 3, 3).noalias() = -0.5 *R_Gtoi.transpose() * skew_x(a_hat * dt * dt); F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; F.block(p_id, ba_id, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt; F.block(p_id, p_id, 3, 3).setIdentity(); @@ -269,8 +282,7 @@ void Propagator::predictAndcomputeJacobians(State *state, const IMUDATA data_min G.block(p_id, 6, 3, 3) = -.5 * imu->Rot().transpose() * dt * dt; } - - // Get noise covariance + // Get noise covariance matrix Eigen::Matrix Qc = Eigen::Matrix::Zero(); Qc.block(0,0,3,3) = _noises.sigma_w_2/dt*Eigen::Matrix::Identity(); Qc.block(3,3,3,3) = _noises.sigma_wb_2*dt*Eigen::Matrix::Identity(); diff --git a/ov_msckf/src/state/Propagator.h b/ov_msckf/src/state/Propagator.h new file mode 100644 index 000000000..a23f3bba9 --- /dev/null +++ b/ov_msckf/src/state/Propagator.h @@ -0,0 +1,172 @@ +#ifndef OV_MSCKF_STATE_PROPAGATOR_H +#define OV_MSCKF_STATE_PROPAGATOR_H + + +#include "state/StateHelper.h" +#include "utils/quat_ops.h" + +using namespace ov_core; + + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Performs the state covariance and mean propagation using imu measurements + */ + class Propagator { + + public: + + /** + * @brief Struct for a single imu measurement (time, wm, am) + */ + struct IMUDATA { + + /// Timestamp of the reading + double timestamp; + + /// Gyroscope reading, angular velocity (rad/s) + Eigen::Matrix wm; + + /// Accelerometer reading, linear acceleration (m/s^2) + Eigen::Matrix am; + + }; + + + /** + * @brief Struct of our imu noise parameters + */ + struct NoiseManager { + + /// Gyro white noise covariance + double sigma_w_2; + + /// Gyro random walk covariance + double sigma_wb_2; + + /// Accel white noise covariance + double sigma_a_2; + + /// Accel random walk covariance + double sigma_ab_2; + + }; + + + /** + * @brief Default constructor + * @param noises imu noise characteristics (continuous time) + * @param gravity Global gravity of the system (normally [0,0,9.81]) + */ + Propagator(NoiseManager noises, Eigen::Matrix gravity) : _noises(noises), + _gravity(gravity) {} + + + /** + * @brief Stores incoming inertial readings + * + * @param timestamp Timestamp of imu reading + * @param wm Gyro angular velocity reading + * @param am Accelerometer linear acceleration reading + */ + void feed_imu(double timestamp, Eigen::Matrix wm, Eigen::Matrix am) { + + // Create our imu data object + IMUDATA data; + data.timestamp = timestamp; + data.wm = wm; + data.am = am; + + // Append it to our vector + imu_data.emplace_back(data); + + } + + + /** + * @brief Propagate state up to given timestamp and then clone + * + * This will first collect all imu readings that occured between the + * *current* state time and the new time we want the state to be at. + * If we don't have any imu readings we will try to extrapolate into the future. + * After propagating the mean and covariance using our dynamics, + * We clone the current imu pose as a new clone in our state. + * + * @param state Pointer to state + * @param timestamp Time to propagate to and clone at + */ + void propagate_and_clone(State *state, double timestamp); + + + + protected: + + + /// Estimate for time offset at last propagation time + double last_prop_time_offset = 0; + + + /** + * @brief Propagates the state forward using the imu data and computes the noise covariance and state-transition + * matrix of this interval. + * + * This function can be replaced with analytical/numerical integration or when using a different state representation. + * This contains our state transition matix along with how our noise evolves in time. + * If you have other state variables besides the IMU that evolve you would add them here. + * + * @param state Pointer to state + * @param data_minus imu readings at beginning of interval + * @param data_plus imu readings at end of interval + * @param F State-transition matrix over the interval + * @param Qd Discrete-time noise covariance over the interval + */ + void predict_and_compute(State *state, const IMUDATA data_minus, const IMUDATA data_plus, + Eigen::Matrix &F, Eigen::Matrix &Qd); + + + /// Container for the noise values + NoiseManager _noises; + + /// Our history of IMU messages (time, angular, linear) + std::vector imu_data; + + /// Gravity vector + Eigen::Matrix _gravity; + + + /** + * @brief Nice helper function that will linearly interpolate between two imu messages. + * + * This should be used instead of just "cutting" imu messages that bound the camera times + * Give better time offset if we use this function, could try other orders/splines if the imu is slow. + * + * @param imu_1 imu at beggining of interpolation interval + * @param imu_2 imu at end of interpolation interval + * @param timestamp Timestamp being interpolated to + */ + IMUDATA interpolate_data(const IMUDATA imu_1, const IMUDATA imu_2, double timestamp) { + // time-distance lambda + double lambda = (timestamp - imu_1.timestamp) / (imu_2.timestamp - imu_1.timestamp); + //cout << "lambda - " << lambda << endl; + // interpolate between the two times + IMUDATA data; + data.timestamp = timestamp; + data.am = (1 - lambda) * imu_1.am + lambda * imu_2.am; + data.wm = (1 - lambda) * imu_1.wm + lambda * imu_2.wm; + return data; + } + + + }; + + +} + +#endif //OV_MSCKF_STATE_PROPAGATOR_H diff --git a/ov_core/src/state/State.cpp b/ov_msckf/src/state/State.cpp similarity index 73% rename from ov_core/src/state/State.cpp rename to ov_msckf/src/state/State.cpp index 0a02ec55f..22b655c91 100644 --- a/ov_core/src/state/State.cpp +++ b/ov_msckf/src/state/State.cpp @@ -1,27 +1,28 @@ -// -// Created by keck on 6/4/19. -// - #include "State.h" -Eigen::MatrixXd State::get_marginal_covariance(const std::vector &small_variables){ + +using namespace ov_core; +using namespace ov_msckf; + + +Eigen::MatrixXd State::get_marginal_covariance(const std::vector &small_variables) { // Calculate the marginal covariance size we need ot make our matrix - int cov_size=0; - for (size_t i=0; i < small_variables.size(); i++){ + int cov_size = 0; + for (size_t i = 0; i < small_variables.size(); i++) { cov_size += small_variables[i]->size(); } // Construct our return covariance - Eigen::MatrixXd Small_cov(cov_size,cov_size); + Eigen::MatrixXd Small_cov(cov_size, cov_size); // For each variable, lets copy over all other variable cross terms // Note: this copies over itself to when i_index=k_index - int i_index=0; - for (size_t i=0; i < small_variables.size(); i++){ - int k_index=0; - for (size_t k=0; k < small_variables.size(); k++){ - Small_cov.block(i_index, k_index, small_variables[i]->size(),small_variables[k]->size()) = + int i_index = 0; + for (size_t i = 0; i < small_variables.size(); i++) { + int k_index = 0; + for (size_t k = 0; k < small_variables.size(); k++) { + Small_cov.block(i_index, k_index, small_variables[i]->size(), small_variables[k]->size()) = _Cov.block(small_variables[i]->id(), small_variables[k]->id(), small_variables[i]->size(), small_variables[k]->size()); k_index += small_variables[k]->size(); @@ -33,7 +34,8 @@ Eigen::MatrixXd State::get_marginal_covariance(const std::vector &small_v return Small_cov; } -void State::initialize_variables(){ + +void State::initialize_variables() { double current_id = 0; _imu = new IMU(); @@ -44,13 +46,13 @@ void State::initialize_variables(){ //Camera to IMU time offset _calib_dt_CAMtoIMU = new Vec(1); - if (_options.do_calib_camera_timeoffset){ + if (_options.do_calib_camera_timeoffset) { _calib_dt_CAMtoIMU->set_local_id(current_id); insert_variable(_calib_dt_CAMtoIMU); current_id++; } - for (size_t i = 0; i < _options.num_cameras; i++){ + for (size_t i = 0; i < _options.num_cameras; i++) { //Allocate pose PoseJPL *pose = new PoseJPL(); //Allocate intrinsics @@ -61,13 +63,13 @@ void State::initialize_variables(){ _cam_intrinsics.insert({i, intrin}); //If calibrating camera-imu pose, add to variables - if (_options.do_calib_camera_pose){ + if (_options.do_calib_camera_pose) { pose->set_local_id(current_id); current_id += 6; insert_variable(pose); } //If calibrating camera intrinsics, add to variables - if (_options.do_calib_camera_intrinsics){ + if (_options.do_calib_camera_intrinsics) { intrin->set_local_id(current_id); current_id += 8; insert_variable(intrin); @@ -76,6 +78,6 @@ void State::initialize_variables(){ _Cov = Eigen::MatrixXd::Zero(current_id, current_id); +} -} \ No newline at end of file diff --git a/ov_msckf/src/state/State.h b/ov_msckf/src/state/State.h new file mode 100644 index 000000000..8d0de7d57 --- /dev/null +++ b/ov_msckf/src/state/State.h @@ -0,0 +1,217 @@ +// +// Created by keck on 6/3/19. +// + +#ifndef OV_MSCKF_STATE_H +#define OV_MSCKF_STATE_H + + +#include +#include + +#include "types/IMU.h" +#include "types/Vec.h" +#include "types/PoseJPL.h" +#include "StateOptions.h" + +using namespace ov_core; + + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief State of our filter + * + * This state has all the current estimates for the filter. + * This system is modeled after the MSCKF filter, thus we have a sliding window of clones. + * We additionally have more parameters for online estimation of calibration and SLAM features. + * We also have the covariance of the system, which should be managed using the StateHelper class. + */ + class State { + + public: + + /** + * @brief Default Constructor (will initialize variables to defaults) + * @param options_ Options structure containing filter options + */ + State(StateOptions &options_) : _options(options_) { + initialize_variables(); + } + + ~State() {} + + /** + * @brief Initializes pointers and covariance + * + * TODO: Read initial values and covariance from options + */ + void initialize_variables(); + + + /** + * @brief For a given set of variables, this will this will calculate a smaller covariance. + * + * That only includes the ones specified with all crossterms. + * Thus the size of the return will be the summed dimension of all the passed variables. + * Normal use for this is a chi-squared check before update (where you don't need the full covariance). + * + * @param small_variables Vector of variables whose marginal covariance is desired + * @return marginal covariance of the passed variables + */ + Eigen::MatrixXd get_marginal_covariance(const std::vector &small_variables); + + /** + * @brief Given an update vector for the **entire covariance**, updates each variable + * @param dx Correction vector for the entire filter state + */ + void update(const Eigen::MatrixXd dx) { + for (size_t i = 0; i < _variables.size(); i++) { + _variables[i]->update(dx.block(_variables[i]->id(), 0, _variables[i]->size(), 1)); + } + } + + /** + * @brief Set the current timestamp of the filter + * @param timestamp New timestamp + */ + void set_timestamp(double timestamp) { + _timestamp = timestamp; + } + + /** + * @brief Will return the timestep that we will marginalize next. + * + * As of right now, since we are using a sliding window, this is the oldest clone. + * But if you wanted to do a keyframe system, you could selectively marginalize clones. + * + * @return timestep of clone we will marginalize + */ + double margtimestep() { + double time = INFINITY; + for (std::pair &clone_imu : _clones_IMU) { + if (clone_imu.first < time) { + time = clone_imu.first; + } + } + return time; + } + + /// Access current timestamp + double timestamp() { + return _timestamp; + } + + /// Access options struct + StateOptions &options() { + return _options; + } + + /// Access to IMU type + IMU *imu() { + return _imu; + } + + /// Access covariance matrix + Eigen::MatrixXd &Cov() { + return _Cov; + } + + /// Access imu-camera time offset type + Vec *calib_dt_CAMtoIMU() { + return _calib_dt_CAMtoIMU; + } + + /// Access to a given clone + PoseJPL *get_clone(double timestamp) { + return _clones_IMU.at(timestamp); + } + + /// Get size of covariance + size_t nVars() { + return _Cov.rows(); + } + + /// Get current number of clones + size_t nClones() { + return _clones_IMU.size(); + } + + + protected: + + /// Current timestamp (should be the last update time!) + double _timestamp; + + /// Struct containing filter options + StateOptions _options; + + /// Pointer to the "active" IMU state (q_GtoI, p_IinG, v_IinG, bg, ba) + IMU *_imu; + + /// Calibration poses for each camera (R_ItoC, p_IinC) + std::map _calib_IMUtoCAM; + + /// Our current set of SLAM features (3d positions) + //std::map _features_SLAM; + + /// Map between imaging times and clone poses (q_GtoIi, p_IiinG) + std::map _clones_IMU; + + /// Time offset base IMU to camera (t_imu = t_cam + t_off) + Vec *_calib_dt_CAMtoIMU; + + /// Camera intrinsics + std::map _cam_intrinsics; + + /// Covariance of all active variables + Eigen::MatrixXd _Cov; + + /// Vector of variables + std::vector _variables; + + + private: + + // Define that the state helper is a friend class of this class + // This will allow it to access the below functions which should normally not be called + // This prevents a developer from thinking that the "insert clone" will actually correctly add it to the covariance + friend class StateHelper; + + /// Insert a new clone specified by its timestep and type + void insert_clone(double timestamp, PoseJPL *pose) { + _clones_IMU.insert({timestamp, pose}); + } + + /// Removes a clone from a specific timestep + void erase_clone(double timestamp) { + _clones_IMU.erase(timestamp); + } + + /// Access all current variables in our covariance + std::vector &variables() { + return _variables; + } + + /// Access a specific variable by its id + Type *variables(size_t i) { + return _variables[i]; + } + + /// Insert a new variable into our vector ( + void insert_variable(Type *newType) { + _variables.push_back(newType); + } + + + }; + +} + +#endif //OV_MSCKF_STATE_H diff --git a/ov_msckf/src/state/StateHelper.cpp b/ov_msckf/src/state/StateHelper.cpp new file mode 100644 index 000000000..75f26e1fb --- /dev/null +++ b/ov_msckf/src/state/StateHelper.cpp @@ -0,0 +1,366 @@ +#include "StateHelper.h" +#include "State.h" + + +using namespace ov_core; +using namespace ov_msckf; + + + + +void StateHelper::EKFUpdate(State *state, const std::vector &H_order, const Eigen::MatrixXd &H, + const Eigen::VectorXd &res, const Eigen::MatrixXd &R) { + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + assert(res.rows() == R.rows()); + assert(H.rows() == res.rows()); + + + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); + + std::vector H_id; + std::vector H_is_active; + int current_it = 0; + + // Get the location in small jacobian for each measuring variable + for (Type *meas_var: H_order) { + H_id.push_back(current_it); + current_it += meas_var->size(); + } + + auto Cov = state->Cov(); + + //========================================================== + //========================================================== + // For each active variable find its M = P*H^T + for (Type *var: state->variables()) { + // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) + Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); + for (size_t i = 0; i < H_order.size(); i++) { + Type *meas_var = H_order[i]; + M_i.noalias() += Cov.block(var->id(), meas_var->id(), var->size(), meas_var->size()) * + H.block(0, H_id[i], H.rows(), meas_var->size()).transpose(); + } + M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; + } + + //========================================================== + //========================================================== + // Get covariance of the envolved terms + Eigen::MatrixXd P_small = state->get_marginal_covariance(H_order); + + // S = H*Cov*H' + R + Eigen::MatrixXd S(R.rows(), R.rows()); + S.triangularView() = H * P_small * H.transpose(); + S.triangularView() += R; + //S = 0.5*(S+S.transpose()); + + // Inverse our S (should we use a more stable method here??) + Eigen::MatrixXd Sinv = Eigen::MatrixXd::Identity(R.rows(), R.rows()); + S.selfadjointView().llt().solveInPlace(Sinv); + Eigen::MatrixXd K = M_a * Sinv.selfadjointView(); + + // Update Covariance + Cov.triangularView() -= K * M_a.transpose(); + Cov = Cov.selfadjointView(); + + // Calculate our delta and pass it to update all our state variables + state->update(K * res); + +} + + +void StateHelper::marginalize(State *state, Type *marg) { + + // Check if the current state has the GPS enabled + if (std::find(state->variables().begin(), state->variables().end(), marg) == state->variables().end()) { + std::cerr << "CovManager::marginalize() - Called on variable that is not in the state" << std::endl; + std::cerr << "CovManager::marginalize() - Marginalization, does NOT work on sub-variables yet..." << std::endl; + std::exit(EXIT_FAILURE); + } + + //Generic covariance has this form for x_1, x_m, x_2. If we want to remove x_m: + // + // P_(x_1,x_1) P(x_1,x_m) P(x_1,x_2) + // P_(x_m,x_1) P(x_m,x_m) P(x_m,x_2) + // P_(x_2,x_1) P(x_2,x_m) P(x_2,x_2) + // + // to + // + // P_(x_1,x_1) P(x_1,x_2) + // P_(x_2,x_1) P(x_2,x_2) + // + // i.e. x_1 goes from 0 to marg_id, x_2 goes from marg_id+marg_size to Cov.rows() + //in the original covariance + + int marg_size = marg->size(); + int marg_id = marg->id(); + + Eigen::MatrixXd Cov_new(state->nVars() - marg_size, state->nVars() - marg_size); + + int x2_size = state->nVars() - marg_id - marg_size; + + //P_(x_1,x_1) + Cov_new.block(0, 0, marg_id, marg_id) = state->Cov().block(0, 0, marg_id, marg_id); + + //P_(x_1,x_2) + Cov_new.block(0, marg_id, marg_id, x2_size) = state->Cov().block(0, marg_id + marg_size, marg_id, x2_size); + + //P_(x_2,x_1) + Cov_new.block(marg_id, 0, x2_size, marg_id) = Cov_new.block(0, marg_id, marg_id, x2_size).transpose(); + + //P(x_2,x_2) + Cov_new.block(marg_id, marg_id, x2_size, x2_size) = state->Cov().block(marg_id + marg_size, marg_id + marg_size, x2_size, x2_size); + + + //Now set new covariance + state->Cov() = Cov_new; + + + //Now we keep the remaining variables and update their ordering + //Note: DOES NOT SUPPORT MARGINALIZING SUBVARIABLES YET!!!!!!! + std::vector remaining_variables; + for (size_t i = 0; i < state->variables().size(); i++) { + //Only keep non-marginal states + if (state->variables(i) != marg) { + if (state->variables(i)->id() > marg_id) { + //If the variable is "beyond" the marginal one in ordering, need + //to "move it forward" + state->variables(i)->set_local_id(state->variables(i)->id() - marg_size); + } + remaining_variables.push_back(state->variables(i)); + } + } + + delete marg; + + + //Now set variables as the remaining ones + state->variables() = remaining_variables; + +} + + +Type* StateHelper::clone(State *state, Type *variable_to_clone) { + + //Get total size of new cloned variables, and the old covariance size + int total_size = variable_to_clone->size(); + int old_size = (int) state->nVars(); + int new_loc = (int) state->nVars(); + + // Resize both our covariance to the new size + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars() + total_size, state->nVars() + total_size)); + + // What is the new state, and variable we inserted + const std::vector new_variables = state->variables(); + Type *new_clone = nullptr; + + // Loop through all variables, and find the variable that we are going to clone + for (size_t k = 0; k < state->variables().size(); k++) { + + // Skip this if it is not the same + Type *type_check = state->variables(k)->check_if_same_variable(variable_to_clone); + if (type_check == nullptr) + continue; + + //So we will clone this one + int old_loc = type_check->id(); + + // Copy the covariance elements + state->Cov().block(new_loc, new_loc, total_size, total_size) = state->Cov().block(old_loc, old_loc, total_size, total_size); + state->Cov().block(0, new_loc, old_size, total_size) = state->Cov().block(0, old_loc, old_size, total_size); + state->Cov().block(new_loc, 0, total_size, old_size) = state->Cov().block(old_loc, 0, total_size, old_size); + + //Create clone from the type being cloned + new_clone = type_check->clone(); + new_clone->set_local_id(new_loc); + + //Add to variable list + state->insert_variable(new_clone); + break; + + } + + // Check if the current state has the GPS enabled + if (new_clone == nullptr) { + std::cerr << "CovManager::clone() - Called on variable is not in the state" << std::endl; + std::cerr << "CovManager::clone() - Ensure that the variable specified is a variable, or sub-variable.." << std::endl; + std::exit(EXIT_FAILURE); + } + + return new_clone; + +} + + + + +void StateHelper::initialize(State *state, Type *new_variable, const std::vector &H_order, Eigen::MatrixXd &H_R, + Eigen::MatrixXd &H_L, Eigen::MatrixXd &R, Eigen::VectorXd &res) { + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + size_t new_var_size = new_variable->size(); + assert (new_var_size == H_L.cols()); + + Eigen::JacobiRotation tempHo_GR; + for (int n = 0; n < H_L.cols(); ++n) { + for (int m = (int) H_L.rows() - 1; m > n; m--) { + // Givens matrix G + tempHo_GR.makeGivens(H_L(m - 1, n), H_L(m, n)); + // Multiply G to the corresponding lines (m-1,m) in each matrix + // Note: we only apply G to the nonzero cols [n:Ho.cols()-n-1], while + // it is equivalent to applying G to the entire cols [0:Ho.cols()-1]. + (H_L.block(m - 1, n, 2, H_L.cols() - n)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + (res.block(m - 1, 0, 2, 1)).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + (H_R.block(m - 1, 0, 2, H_R.cols())).applyOnTheLeft(0, 1, tempHo_GR.adjoint()); + } + } + + + //Separate into initializing and updating portions + Eigen::MatrixXd Hxinit = H_R.block(0, 0, new_var_size, H_R.cols()); + Eigen::MatrixXd Hup = H_R.block(new_var_size, 0, H_R.rows() - new_var_size, H_R.cols()); + + Eigen::MatrixXd H_finit = H_L.block(0, 0, new_var_size, new_var_size); + + Eigen::VectorXd resinit = res.block(0, 0, new_var_size, 1); + Eigen::VectorXd resup = res.block(new_var_size, 0, res.rows() - new_var_size, 1); + + Eigen::MatrixXd Rinit = R.block(0, 0, new_var_size, new_var_size); + Eigen::MatrixXd Rup = R.block(new_var_size, new_var_size, R.rows() - new_var_size, R.rows() - new_var_size); + + //=========================================== + // Finally, initialize it in our state + StateHelper::initialize_invertible(state, new_variable, H_order, Hxinit, H_finit, Rinit, resinit); + + //Update with updating portion + if (Hup.rows() > 0) { + StateHelper::EKFUpdate(state, H_order, Hup, resup, Rup); + } + +} + + +void StateHelper::initialize_invertible(State *state, Type *new_variable, const std::vector &H_order, const Eigen::MatrixXd &H_R, + const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res) { + + //========================================================== + //========================================================== + // Part of the Kalman Gain K = M*S^{-1} + + assert(H_L.rows() == H_L.cols()); + assert(new_variable->size() == H_L.rows()); + + Eigen::MatrixXd H_Linv = H_L.inverse(); + + Eigen::MatrixXd M_a = Eigen::MatrixXd::Zero(state->nVars(), res.rows()); + + std::vector H_id; + std::vector H_is_active; + int current_it = 0; + + // Get the location in small jacobian for each measuring variable + for (Type *meas_var: H_order) { + H_id.push_back(current_it); + current_it += meas_var->size(); + } + + //========================================================== + //========================================================== + // For each active variable find its M = P*H^T + for (Type *var: state->variables()) { + // Sum up effect of each subjacobian= K_i= \sum_m (P_im Hm^T) + Eigen::MatrixXd M_i = Eigen::MatrixXd::Zero(var->size(), res.rows()); + for (size_t i = 0; i < H_order.size(); i++) { + Type *meas_var = H_order[i]; + M_i += state->Cov().block(var->id(), meas_var->id(), var->size(), meas_var->size()) * + H_R.block(0, H_id[i], H_R.rows(), meas_var->size()).transpose(); + } + M_a.block(var->id(), 0, var->size(), res.rows()) = M_i; + } + + + //========================================================== + //========================================================== + //Get covariance of this small jacobian + Eigen::MatrixXd P_small = state->get_marginal_covariance(H_order); + + // M = H_R*Cov*H_R' + R + Eigen::MatrixXd M(H_R.rows(), H_R.rows()); + M.triangularView() = H_R * P_small * H_R.transpose(); + M.triangularView() += R; + + // Covariance of the variable/landmark that will be initialized + Eigen::MatrixXd P_LL = H_Linv * M.selfadjointView() * H_Linv.transpose(); + + size_t oldSize = state->nVars(); + + state->Cov().conservativeResizeLike(Eigen::MatrixXd::Zero(state->nVars() + new_variable->size(), + state->nVars() + new_variable->size())); + + state->Cov().block(0, oldSize, oldSize, new_variable->size()).noalias() = -M_a * H_Linv.transpose(); + state->Cov().block(oldSize, 0, new_variable->size(), oldSize) = state->Cov().block(0, oldSize, oldSize, new_variable->size()).transpose(); + state->Cov().block(oldSize, oldSize, new_variable->size(), new_variable->size()) = P_LL; + + + // Update the variable that will be initialized (invertible systems can only update the new variable. However this + // Update should be almost zero if we already used a conditional Gauss-Newton to solve for the initial estimate + new_variable->update(H_Linv * res); + + // Now collect results, and add it to the state variables + new_variable->set_local_id((int) (state->nVars() - new_variable->size())); + state->insert_variable(new_variable); + + +} + + + + +void StateHelper::augment_clone(State *state, Eigen::Matrix last_w) { + + auto imu = state->imu(); + auto Cov = state->Cov(); + + // Call on our marginalizer to clone, it will add it to our vector of types + // NOTE: this will clone the clone pose to the END of the covariance... + Type *posetemp = StateHelper::clone(state, imu->pose()); + + // Cast to a JPL pose type + PoseJPL *pose = dynamic_cast(posetemp); + + // Check that it was a valid cast + if (pose == nullptr) { + //ROS_ERROR("INVALID OBJECT RETURNED FROM MARGINALIZER, EXITING!#!@#!@#"); + exit(EXIT_FAILURE); + } + + // Append the new clone to our clone vector + state->insert_clone(state->timestamp(), pose); + + // If we are doing time calibration, then our clones are a function of the time offset + // Logic is based on Mingyang Li and Anastasios I. Mourikis paper: + // http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 + if (state->options().do_calib_camera_timeoffset) { + // Jacobian to augment by + Eigen::Matrix dnc_dt = Eigen::MatrixXd::Zero(6, 1); + dnc_dt.block(0, 0, 3, 1) = last_w; + dnc_dt.block(3, 0, 3, 1) = imu->vel(); + // Augment covariance with time offset Jacobian + Cov.block(0, pose->id(), Cov.rows(), 6) += Cov.block(0, state->calib_dt_CAMtoIMU()->id(), + Cov.rows(), 1) * dnc_dt.transpose(); + Cov.block(pose->id(), 0, 6, Cov.rows()) += dnc_dt * Cov.block(state->calib_dt_CAMtoIMU()->id(), 0, 1, Cov.rows()); + Cov.block(pose->id(), pose->id(), 6, 6) += dnc_dt * Cov(state->calib_dt_CAMtoIMU()->id(), + state->calib_dt_CAMtoIMU()->id()) * dnc_dt.transpose(); + } + +} + + + diff --git a/ov_msckf/src/state/StateHelper.h b/ov_msckf/src/state/StateHelper.h new file mode 100644 index 000000000..e0b5744ec --- /dev/null +++ b/ov_msckf/src/state/StateHelper.h @@ -0,0 +1,146 @@ +#ifndef OV_MSCKF_STATE_HELPER_H +#define OV_MSCKF_STATE_HELPER_H + + +#include "State.h" + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Helper which manipulates the State its covariance. + * + * In general, this class has all the core logic for the MSCKF system in it. + * This has all functions that change the covariance along with addition and removing elements from the state. + * All functions here are static, and thus are self-contained so that in the future multiple states could be tracked and updated. + * We recommend you look directly at the code for this class for clarity on what exactly we are doing in each. + */ + class StateHelper { + + public: + + + /** + * @brief Performs EKF update of the state + * + * @param state Pointer to state + * @param H_order Variable ordering used in the compressed Jacobian + * @param H Condensed Jacobian of updating measurement + * @param res residual of updating measurement + * @param R updating measurement covariance + */ + static void EKFUpdate(State *state, const std::vector &H_order, const Eigen::MatrixXd &H, + const Eigen::VectorXd &res, const Eigen::MatrixXd &R); + + + /** + * @brief Marginalizes a variable, properly modifying the ordering/covariances in the state + * + * This function can support any Type variable out of the box. + * Right now the marginalization of a sub-variable/type is not supported. + * For example if you wanted to just marginalize the orientation of a PoseJPL, that isn't supported. + * We will first remove the rows and columns corresponding to the type (i.e. do the marginalization). + * After we update all the type ids so that they take into account that the covariance has shrunk in parts of it. + * + * @param state Pointer to state + * @param marg Pointer to variable to marginalize + */ + static void marginalize(State *state, Type *marg); + + + /** + * @brief Clones "variable to clone" and places it at end of covariance + * + * @param state Pointer to state + * @param variable_to_clone Pointer to variable that will be cloned + */ + static Type* clone(State *state, Type *variable_to_clone); + + + /** + * @brief Initializes new variable into covariance. + * + * Uses Givens to separate into updating and initializing systems (therefore system must be fed as isotropic). + * If you are not isotropic first whiten your system (TODO: we should add a helper function to do this). + * If your H_L Jacobian is already directly invertable, the just call the initialize_invertible() instead of this function. + * + * @param state Pointer to state + * @param new_variable Pointer to variable to be initialized + * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian + * @param H_R Jacobian of initializing measurements wrt variables in H_order + * @param H_L Jacobian of initializing measurements wrt new variable + * @param R Covariance of initializing measurements (isotropic) + * @param res Residual of initializing measurements + */ + static void initialize(State *state, Type *new_variable, const std::vector &H_order, Eigen::MatrixXd &H_R, + Eigen::MatrixXd &H_L, Eigen::MatrixXd &R, Eigen::VectorXd &res); + + + /** + * @brief Initializes new variable into covariance (H_L must be invertible) + * + * @param state Pointer to state + * @param new_variable Pointer to variable to be initialized + * @param H_order Vector of pointers in order they are contained in the condensed state Jacobian + * @param H_R Jacobian of initializing measurements wrt variables in H_order + * @param H_L Jacobian of initializing measurements wrt new variable (needs to be invertible) + * @param R Covariance of initializing measurements + * @param res Residual of initializing measurements + */ + static void initialize_invertible(State *state, Type *new_variable, const std::vector &H_order, const Eigen::MatrixXd &H_R, + const Eigen::MatrixXd &H_L, const Eigen::MatrixXd &R, const Eigen::VectorXd &res); + + + /** + * @brief Augment the state with a stochastic copy of the current IMU pose + * + * After propagation, normally we augment the state with an new clone that is at the new update timestep. + * This augmentation clones the IMU pose and adds it to our state's clone map. + * If we are doing time offset calibration we also make our cloning a function of the time offset. + * Time offset logic is based on Mingyang Li and Anastasios I. Mourikis paper: http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 + * + * @param state Pointer to state + * @param last_w The estimated angular velocity at cloning time (used to estimate imu-cam time offset) + */ + static void augment_clone(State *state, Eigen::Matrix last_w); + + + /** + * @brief Remove the oldest clone, if we have more then the max clone count!! + * + * This will marginalize the clone from our covariance, and remove it from our state. + * This is mainly a helper function that we can call after each update. + * It will marginalize the clone specified by State::margtimestep() which should return a clone timestamp. + * + * @param state Pointer to state + */ + static void marginalize_old_clone(State *state) { + if (state->nClones() > state->options().max_clone_size) { + double margTime = state->margtimestep(); + StateHelper::marginalize(state, state->get_clone(margTime)); + // Note that the marginalizer should have already deleted the clone + // Thus we just need to remove the pointer to it from our state + state->erase_clone(margTime); + } + } + + + private: + + /** + * All function in this class should be static. + * Thus an instance of this class cannot be created. + */ + StateHelper() {} + + + }; + +} + +#endif //OV_MSCKF_STATE_HELPER_H diff --git a/ov_msckf/src/state/StateOptions.h b/ov_msckf/src/state/StateOptions.h new file mode 100644 index 000000000..f0b25112e --- /dev/null +++ b/ov_msckf/src/state/StateOptions.h @@ -0,0 +1,46 @@ +#ifndef OV_MSCKF_STATE_OPTIONS_H +#define OV_MSCKF_STATE_OPTIONS_H + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Struct which stores all our filter options + */ + struct StateOptions { + + /// Bool to determine whether or not to do first estimate Jacobians + bool do_fej = false; + + /// Bool to determine whether or not use imu message averaging + bool imu_avg = false; + + /// Bool to determine whether or not to calibrate imu-to-camera pose + bool do_calib_camera_pose = false; + + /// Bool to determine whether or not to calibrate camera intrinsics + bool do_calib_camera_intrinsics = false; + + /// Bool to determine whether or not to calibrate camera to IMU time offset + bool do_calib_camera_timeoffset = false; + + /// Max clone size of sliding window + size_t max_clone_size = 8; + + /// Max number of estimated SLAM features + size_t max_slam_features = 0; + + /// Number of cameras + size_t num_cameras = 1; + + }; + + +} + +#endif //OV_MSCKF_STATE_OPTIONS_H diff --git a/ov_msckf/src/test_msckf.cpp b/ov_msckf/src/test_msckf.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/ov_msckf/src/types/IMU.h b/ov_msckf/src/types/IMU.h new file mode 100644 index 000000000..b32647e72 --- /dev/null +++ b/ov_msckf/src/types/IMU.h @@ -0,0 +1,245 @@ +#ifndef OV_MSCKF_TYPE_IMU_H +#define OV_MSCKF_TYPE_IMU_H + + +#include "utils/quat_ops.h" +#include "PoseJPL.h" + + +using namespace ov_core; + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Derived Type class that implements an IMU state + * + * Contains a PoseJPL, Vec velocity, Vec gyro bias, and Vec accel bias. + * This should be simular to that of the standard MSCKF state besides the ordering. + * The pose is first, followed by velocity, etc. + */ + class IMU : public Type { + + public: + + IMU() : Type(15) { + _pose = new PoseJPL(); + _v = new Vec(3); + _bg = new Vec(3); + _ba = new Vec(3); + + Eigen::Matrix imu0; + imu0.setZero(); + imu0(3) = 1.0; + + set_value(imu0); + set_fej(imu0); + } + + ~IMU() {} + + /** + * @brief Performs update operation using JPLQuat update for orientation, then vector updates for + * position, velocity, gyro bias, and accel bias (in that order). + * + * @param dx 15 DOF vector encoding update using the following order (q, p, v, bg, ba) + */ + void update(const Eigen::VectorXd dx) override { + + assert(dx.rows() == _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5 * dx.block(0, 0, 3, 1), 1.0; + dq = dq / dq.norm(); + + newX.block(0, 0, 4, 1) = quat_multiply(dq, _value); + newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); + + newX.block(7, 0, 3, 1) += dx.block(6, 0, 3, 1); + newX.block(10, 0, 3, 1) += dx.block(9, 0, 3, 1); + newX.block(13, 0, 3, 1) += dx.block(12, 0, 3, 1); + + set_value(newX); + + } + + + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value(const Eigen::VectorXd new_value) override { + + _pose->set_value(new_value.block(0, 0, 7, 1)); + _v->set_value(new_value.block(7, 0, 3, 1)); + _bg->set_value(new_value.block(10, 0, 3, 1)); + _ba->set_value(new_value.block(13, 0, 3, 1)); + + _value = new_value; + } + + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej(const Eigen::VectorXd new_value) override { + + _pose->set_fej(new_value.block(0, 0, 7, 1)); + _v->set_fej(new_value.block(7, 0, 3, 1)); + _bg->set_fej(new_value.block(10, 0, 3, 1)); + _ba->set_fej(new_value.block(13, 0, 3, 1)); + + _fej = new_value; + } + + Type *clone() override { + Type *Clone = new IMU(); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + /** + * @brief Used to find the components inside the IMU if needed. + * If the passed variable is a sub-variable or the current variable this will return it. + * Otherwise it will return a nullptr, meaning that it was unable to be found. + * + * @param check variable to find + */ + Type *check_if_same_variable(const Type *check) override { + if (check == this) { + return this; + } else if (check == _pose) { + return _pose; + } else if (check == q()) { + return q(); + } else if (check == p()) { + return p(); + } else if (check == _v) { + return _v; + } else if (check == _bg) { + return bg(); + } else if (check == _ba) { + return ba(); + } else { + return nullptr; + } + } + + /// Rotation access + Eigen::Matrix Rot() const { + return _pose->Rot(); + } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { + return _pose->Rot_fej(); + } + + /// Rotation access quaternion + Eigen::Matrix quat() const { + return _pose->quat(); + } + + /// FEJ Rotation access quaternion + Eigen::Matrix quat_fej() const { + return _pose->quat_fej(); + } + + + /// Position access + Eigen::Matrix pos() const { + return _pose->pos(); + } + + /// FEJ position access + Eigen::Matrix pos_fej() const { + return _pose->pos_fej(); + } + + /// Velocity access + Eigen::Matrix vel() const { + return _v->value(); + } + + // FEJ velocity access + Eigen::Matrix vel_fej() const { + return _v->fej(); + } + + /// Gyro bias access + Eigen::Matrix bias_g() const { + return _bg->value(); + } + + /// FEJ gyro bias access + Eigen::Matrix bias_g_fej() const { + return _bg->fej(); + } + + /// Accel bias access + Eigen::Matrix bias_a() const { + return _ba->value(); + } + + // FEJ accel bias access + Eigen::Matrix bias_a_fej() const { + return _ba->fej(); + } + + /// Pose type access + PoseJPL *pose() { + return _pose; + } + + /// Quaternion type access + JPLQuat *q() { + return _pose->q(); + } + + /// Position type access + Vec *p() { + return _pose->p(); + } + + /// Velocity type access + Vec *v() { + return _v; + } + + /// Gyroscope bias access + Vec *bg() { + return _bg; + } + + /// Acceleration bias access + Vec *ba() { + return _ba; + } + + protected: + + /// Pose subvariable + PoseJPL *_pose; + + /// Velocity subvariable + Vec *_v; + + /// Gyroscope bias subvariable + Vec *_bg; + + /// Acceleration bias subvariable + Vec *_ba; + + }; + +} + + +#endif //OV_MSCKF_TYPE_IMU_H \ No newline at end of file diff --git a/ov_msckf/src/types/JPLQuat.h b/ov_msckf/src/types/JPLQuat.h new file mode 100644 index 000000000..ec688040d --- /dev/null +++ b/ov_msckf/src/types/JPLQuat.h @@ -0,0 +1,122 @@ +#ifndef OV_MSCKF_TYPE_JPLQUAT_H +#define OV_MSCKF_TYPE_JPLQUAT_H + + +#include "utils/quat_ops.h" +#include "Type.h" + +using namespace ov_core; + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Derived Type class that implements JPL quaternion + * + * This quaternion uses a left-multiplicative error state and follows the "JPL convention". + * Please checkout our utility functions in the quat_ops.h file. + * We recommend that people new quaternions check out the following resources: + * - http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf + * - ftp://naif.jpl.nasa.gov/pub/naif/misc/Quaternion_White_Paper/Quaternions_White_Paper.pdf + */ + class JPLQuat : public Type { + + public: + + JPLQuat() : Type(3) { + Eigen::Matrix q0; + q0.setZero(); + q0(3) = 1.0; + set_value(q0); + set_fej(q0); + } + + ~JPLQuat() {} + + /** + * @brief Implements update operation by left-multiplying the current + * quaternion with a quaternion built from a small axis-angle perturbation. + * + * @f[ + * \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}} + * @f] + * + * @param dx Axis-angle representation of the perturbing quaternion + */ + void update(const Eigen::VectorXd dx) override { + + assert(dx.rows() == _size); + + //Build perturbing quaternion + Eigen::Matrix dq; + dq << .5 * dx, 1.0; + dq = dq / dq.norm(); + + //Update estimate and recompute R + set_value(quat_multiply(dq, _value)); + + } + + /** + * @brief Sets the value of the estimate and recomputes the internal rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_value(const Eigen::VectorXd new_value) override { + + assert(new_value.rows() == 7); + + _value = new_value; + + //compute associated rotation + _R = quat_2_Rot(new_value); + } + + Type *clone() override { + Type *Clone = new JPLQuat(); + Clone->set_value(value()); + return Clone; + } + + /** + * @brief Sets the fej value and recomputes the fej rotation matrix + * @param new_value New value for the quaternion estimate + */ + void set_fej(const Eigen::VectorXd new_value) override { + + assert(new_value.rows() == 7); + + _fej = new_value; + + //compute associated rotation + _Rfej = quat_2_Rot(new_value); + } + + /// Rotation access + Eigen::Matrix Rot() const { + return _R; + } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { + return _Rfej; + } + + protected: + + // Stores the rotation + Eigen::Matrix _R; + + // Stores the first-estimate rotation + Eigen::Matrix _Rfej; + + }; + + +} + +#endif //OV_MSCKF_TYPE_JPLQUAT_H diff --git a/ov_msckf/src/types/PoseJPL.h b/ov_msckf/src/types/PoseJPL.h new file mode 100644 index 000000000..3e18a3a0e --- /dev/null +++ b/ov_msckf/src/types/PoseJPL.h @@ -0,0 +1,181 @@ +#ifndef OV_MSCKF_TYPE_POSEJPL_H +#define OV_MSCKF_TYPE_POSEJPL_H + +#include "utils/quat_ops.h" +#include "JPLQuat.h" +#include "Vec.h" + + +using namespace ov_core; + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Derived Type class that implements a 6 d.o.f pose + * + * Internally we use a JPLQuat quaternion representation for the orientation and 3D Vec position. + * Please see JPLQuat for details on its update procedure and its left multiplicative error. + */ + class PoseJPL : public Type { + + public: + + PoseJPL() : Type(6) { + + //Initialize subvariables + _q = new JPLQuat(); + _p = new Vec(3); + + Eigen::Matrix pose0; + pose0.setZero(); + pose0(3) = 1.0; + set_value(pose0); + set_fej(pose0); + } + + ~PoseJPL() {} + + /** + * @brief Update q and p using a the JPLQuat update for orientation and vector update for position + * + * @param dx Correction vector (orientation then position) + */ + void update(const Eigen::VectorXd dx) override { + + assert(dx.rows() == _size); + + Eigen::Matrix newX = _value; + + Eigen::Matrix dq; + dq << .5 * dx.block(0, 0, 3, 1), 1.0; + dq = dq / dq.norm(); + + //Update orientation + newX.block(0, 0, 4, 1) = quat_multiply(dq, quat()); + + //Update position + newX.block(4, 0, 3, 1) += dx.block(3, 0, 3, 1); + + set_value(newX); + + } + + /** + * @brief Sets the value of the estimate + * @param new_value New value we should set + */ + void set_value(const Eigen::VectorXd new_value) override { + + assert(new_value.rows() == 7); + + //Set orientation value + _q->set_value(new_value.block(0, 0, 4, 1)); + + //Set position value + _p->set_value(new_value.block(4, 0, 3, 1)); + + _value = new_value; + } + + /** + * @brief Sets the value of the first estimate + * @param new_value New value we should set + */ + void set_fej(const Eigen::VectorXd new_value) override { + + assert(new_value.rows() == 7); + //Set orientation fej value + _q->set_fej(new_value.block(0, 0, 4, 1)); + + //Set position fej value + _p->set_fej(new_value.block(4, 0, 3, 1)); + + _fej = new_value; + } + + Type *clone() override { + Type *Clone = new PoseJPL(); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + /** + * @brief Used to find the components inside the Pose if needed + * If the passed variable is a sub-variable or the current variable this will return it. + * Otherwise it will return a nullptr, meaning that it was unable to be found. + * + * @param check variable to find + */ + Type *check_if_same_variable(const Type *check) override { + if (check == this) { + return this; + } else if (check == _q) { + return q(); + } else if (check == _p) { + return p(); + } else { + return nullptr; + } + } + + /// Rotation access + Eigen::Matrix Rot() const { + return _q->Rot(); + } + + /// FEJ Rotation access + Eigen::Matrix Rot_fej() const { + return _q->Rot_fej();; + } + + /// Rotation access as quaternion + Eigen::Matrix quat() const { + return _q->value(); + } + + /// FEJ Rotation access as quaternion + Eigen::Matrix quat_fej() const { + return _q->fej(); + } + + + /// Position access + Eigen::Matrix pos() const { + return _p->value(); + } + + // FEJ position access + Eigen::Matrix pos_fej() const { + return _p->fej(); + } + + // Quaternion type access + JPLQuat *q() { + return _q; + } + + // Position type access + Vec *p() { + return _p; + } + + protected: + + /// Subvariable containing orientation + JPLQuat *_q; + + /// Subvariable containg position + Vec *_p; + + }; + +} + +#endif //OV_MSCKF_TYPE_POSEJPL_H \ No newline at end of file diff --git a/ov_msckf/src/types/Type.h b/ov_msckf/src/types/Type.h new file mode 100644 index 000000000..112cfe9cd --- /dev/null +++ b/ov_msckf/src/types/Type.h @@ -0,0 +1,134 @@ +#ifndef OV_MSCKF_TYPE_BASE_H +#define OV_MSCKF_TYPE_BASE_H + + +#include + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Base class for estimated variables. + * + * This class is used how variables are represented or updated (e.g., vectors or quaternions). + * Each variable is defined by its error state size and its location in the covariance matrix. + * We additionally require all sub-types to have a update procedure. + */ + class Type { + + public: + + /** + * @brief Default constructor for our Type + * + * @param size_ degrees of freedom of variable (i.e., the size of the error state) + */ + Type(int size_) { _size = size_; } + + virtual ~Type() {}; + + /** + * @brief Sets id used to track location of variable in the filter covariance + * + * @param new_id entry in filter covariance corresponding to this variable + */ + virtual void set_local_id(int new_id) { + _id = new_id; + } + + /** + * @brief Access to variable id (i.e. its location in the covariance) + */ + int id() { + return _id; + } + + /** + * @brief Access to variable size (i.e. its error state size) + */ + int size() { + return _size; + } + + /** + * @brief Update variable due to perturbation of error state + * + * @param dx Perturbation used to update the variable through a defined "boxplus" operation + */ + virtual void update(const Eigen::VectorXd dx) = 0; + + /** + * @brief Access variable's estimate + */ + virtual Eigen::VectorXd value() const { + return _value; + } + + /** + * @brief Access variable's first-estimate + */ + virtual Eigen::VectorXd fej() const { + return _fej; + } + + /** + * @brief Overwrite value of state's estimate + * @param new_value New value that will overwrite state's value + */ + virtual void set_value(const Eigen::VectorXd new_value) { + _value = new_value; + } + + /** + * @brief Overwrite value of first-estimate + * @param new_value New value that will overwrite state's fej + */ + virtual void set_fej(const Eigen::VectorXd new_value) { + _fej = new_value; + } + + /** + * @brief Create a clone of this variable + */ + virtual Type *clone() = 0; + + /** + * @brief Determine if "check" is the same variable + * If the passed variable is a sub-variable or the current variable this will return it. + * Otherwise it will return a nullptr, meaning that it was unable to be found. + * + * @param check Type pointer to compare to + */ + virtual Type *check_if_same_variable(const Type *check) { + if (check == this) { + return this; + } else { + return nullptr; + } + } + + protected: + + /// First-estimate + Eigen::VectorXd _fej; + + /// Current best estimate + Eigen::VectorXd _value; + + /// Location of error state in covariance + int _id = -1; + + /// Dimension of error state + int _size = -1; + + + }; + +} + +#endif //OV_MSCKF_TYPE_BASE_H \ No newline at end of file diff --git a/ov_msckf/src/types/Vec.h b/ov_msckf/src/types/Vec.h new file mode 100644 index 000000000..ef9116630 --- /dev/null +++ b/ov_msckf/src/types/Vec.h @@ -0,0 +1,61 @@ +// +// Created by keck on 5/23/19. +// + +#ifndef OV_MSCKF_TYPE_VEC_H +#define OV_MSCKF_TYPE_VEC_H + +#include "Type.h" + + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + /** + * @brief Derived Type class that implements vector variables + */ + class Vec : public Type { + + public: + + /** + * @brief Default constructor for Vec + * @param dim Size of the vector (will be same as error state) + */ + Vec(int dim) : Type(dim) { + _value = Eigen::VectorXd::Zero(dim); + _fej = Eigen::VectorXd::Zero(dim); + } + + ~Vec() {} + + /** + * @brief Implements the update operation through standard vector addition + * @param dx Additive error state correction + */ + void update(const Eigen::VectorXd dx) override { + assert(dx.rows() == _size); + set_value(_value + dx); + } + + /** + * @brief Performs all the cloning + */ + Type *clone() override { + Type *Clone = new Vec(_size); + Clone->set_value(value()); + Clone->set_fej(fej()); + return Clone; + } + + + }; + +} + +#endif //OV_MSCKF_TYPE_VEC_H \ No newline at end of file From d3e70fc9767ba9ec207ca4fe84d7d51881d1d76a Mon Sep 17 00:00:00 2001 From: Patrick Geneva Date: Wed, 5 Jun 2019 12:55:53 -0400 Subject: [PATCH 6/6] started on viomanager --- ov_msckf/CMakeLists.txt | 2 + ov_msckf/src/core/VioManager.cpp | 36 ++++++++++++ ov_msckf/src/core/VioManager.h | 93 +++++++++++++++++++++++++++++++ ov_msckf/src/core/Visualizer.cpp | 0 ov_msckf/src/core/Visualizer.h | 0 ov_msckf/src/state/State.h | 4 -- ov_msckf/src/state/StateOptions.h | 6 +- ov_msckf/src/test_msckf.cpp | 34 +++++++++++ 8 files changed, 168 insertions(+), 7 deletions(-) create mode 100644 ov_msckf/src/core/VioManager.cpp create mode 100644 ov_msckf/src/core/VioManager.h create mode 100644 ov_msckf/src/core/Visualizer.cpp create mode 100644 ov_msckf/src/core/Visualizer.h diff --git a/ov_msckf/CMakeLists.txt b/ov_msckf/CMakeLists.txt index 5ebe6e22b..19ade12bd 100644 --- a/ov_msckf/CMakeLists.txt +++ b/ov_msckf/CMakeLists.txt @@ -66,6 +66,8 @@ add_library(ov_msckf src/state/State.cpp src/state/StateHelper.cpp src/state/Propagator.cpp + src/core/VioManager.cpp + src/core/Visualizer.cpp ) target_link_libraries(ov_msckf ${thirdparty_libraries}) diff --git a/ov_msckf/src/core/VioManager.cpp b/ov_msckf/src/core/VioManager.cpp new file mode 100644 index 000000000..4fc8af47b --- /dev/null +++ b/ov_msckf/src/core/VioManager.cpp @@ -0,0 +1,36 @@ +#include "VioManager.h" + + + +using namespace ov_core; +using namespace ov_msckf; + + + + + +VioManager::VioManager(ros::NodeHandle &nh) { + + + + // Load our state options + StateOptions state_options; + + nh.param("use_fej", state_options.do_fej, false); + nh.param("use_imuavg", state_options.imu_avg, false); + nh.param("calib_cam_extrinsics", state_options.do_calib_camera_pose, false); + nh.param("calib_cam_intrinsics", state_options.do_calib_camera_intrinsics, false); + nh.param("calib_camimu_dt", state_options.do_calib_camera_timeoffset, false); + nh.param("max_clones", state_options.max_clone_size, 10); + nh.param("max_slam", state_options.max_slam_features, 0); + nh.param("max_cameras", state_options.num_cameras, 1); + + + + + +} + + + + diff --git a/ov_msckf/src/core/VioManager.h b/ov_msckf/src/core/VioManager.h new file mode 100644 index 000000000..d392d8da3 --- /dev/null +++ b/ov_msckf/src/core/VioManager.h @@ -0,0 +1,93 @@ +#ifndef OV_MSCKF_VIOMANAGER_H +#define OV_MSCKF_VIOMANAGER_H + + +#include "track/TrackAruco.h" +#include "track/TrackDescriptor.h" +#include "track/TrackKLT.h" + +#include "state/Propagator.h" +#include "state/State.h" +#include "state/StateHelper.h" + + +/** + * @namespace ov_msckf + * @brief The Open VINS MSCKF + */ +namespace ov_msckf { + + + + /** + * @brief Core class that manages the entire system + * + * This class contains the state and other algorithms needed for the MSCKF to work. + * We feed in measurements into this class and send them to their respective algorithms. + * If we have measurements to propagate or update with, this class will call on our state to do that. + */ + class VioManager { + + + public: + + + /** + * @brief Default constructor, will load all configuration variables + * @param nh ROS node handler which we will load parameters from + */ + VioManager(ros::NodeHandle& nh); + + + /** + * @brief Feed function for inertial data + * @param timestamp Time of the inertial measurement + * @param wm Angular velocity + * @param am Linear acceleration + */ + void feed_measurement_imu(double timestamp, Eigen::Matrix wm, Eigen::Matrix am); + + + /** + * @brief Feed function for a single camera + * @param timestamp Time that this image was collected + * @param img0 Grayscale image + * @param cam_id Unique id of what camera the image is from + */ + void feed_measurement_monocular(double timestamp, cv::Mat& img0, size_t cam_id); + + /** + * @brief Feed function for stereo camera pair + * @param timestamp Time that this image was collected + * @param img0 Grayscale image + * @param img1 Grayscale image + * @param cam_id0 Unique id of what camera the image is from + * @param cam_id1 Unique id of what camera the image is from + */ + void feed_measurement_stereo(double timestamp, cv::Mat& img0, cv::Mat& img1, size_t cam_id0, size_t cam_id1); + + + + + protected: + + + /// Our master state object :D + State* state; + + /// Our sparse feature tracker (klt or descriptor) + TrackBase* trackFEATS; + + /// Our aruoc tracker + TrackBase* trackARUCO; + + + + }; + + +} + + + +#endif //OV_MSCKF_VIOMANAGER_H diff --git a/ov_msckf/src/core/Visualizer.cpp b/ov_msckf/src/core/Visualizer.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/ov_msckf/src/core/Visualizer.h b/ov_msckf/src/core/Visualizer.h new file mode 100644 index 000000000..e69de29bb diff --git a/ov_msckf/src/state/State.h b/ov_msckf/src/state/State.h index 8d0de7d57..f6618ed64 100644 --- a/ov_msckf/src/state/State.h +++ b/ov_msckf/src/state/State.h @@ -1,7 +1,3 @@ -// -// Created by keck on 6/3/19. -// - #ifndef OV_MSCKF_STATE_H #define OV_MSCKF_STATE_H diff --git a/ov_msckf/src/state/StateOptions.h b/ov_msckf/src/state/StateOptions.h index f0b25112e..3a34f463c 100644 --- a/ov_msckf/src/state/StateOptions.h +++ b/ov_msckf/src/state/StateOptions.h @@ -30,13 +30,13 @@ namespace ov_msckf { bool do_calib_camera_timeoffset = false; /// Max clone size of sliding window - size_t max_clone_size = 8; + int max_clone_size = 8; /// Max number of estimated SLAM features - size_t max_slam_features = 0; + int max_slam_features = 0; /// Number of cameras - size_t num_cameras = 1; + int num_cameras = 1; }; diff --git a/ov_msckf/src/test_msckf.cpp b/ov_msckf/src/test_msckf.cpp index e69de29bb..151b94502 100644 --- a/ov_msckf/src/test_msckf.cpp +++ b/ov_msckf/src/test_msckf.cpp @@ -0,0 +1,34 @@ + + + + + + + +// Main function +int main(int argc, char** argv) +{ + + + + + +} + + + + + + + + + + + + + + + + + +