diff --git a/Doxyfile b/Doxyfile index 3852779d6..92e1aeb8f 100644 --- a/Doxyfile +++ b/Doxyfile @@ -1761,7 +1761,7 @@ LATEX_BIB_STYLE = plainnat # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. -LATEX_TIMESTAMP = NO +LATEX_TIMESTAMP = YES #--------------------------------------------------------------------------- # Configuration options related to the RTF output diff --git a/ReadMe.md b/ReadMe.md index d1698022a..1448a49c1 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -21,7 +21,7 @@ details on what the system supports. ## News / Events - +* **May 11, 2023** - Inertial intrinsic support released as part of v2.7 along with a few bug fixes and improvements to stereo KLT tracking. Please check out the [release page](https://github.com/rpng/open_vins/releases/tag/v2.7) for details. * **April 15, 2023** - Minor update to v2.6.3 to support incremental feature triangulation of active features for downstream applications, faster zero-velocity update, small bug fixes, some example realsense configurations, and cached fast state prediction. Please check out the [release page](https://github.com/rpng/open_vins/releases/tag/v2.6.3) for details. * **April 3, 2023** - We have released a monocular plane-aided VINS, termed [ov_plane](https://github.com/rpng/ov_plane), which leverages the OpenVINS project. Both now support the released [Indoor AR Table](https://github.com/rpng/ar_table_dataset) dataset. * **July 14, 2022** - Improved feature extraction logic for >100hz tracking, some bug fixes and updated scripts. See v2.6.1 [PR#259](https://github.com/rpng/open_vins/pull/259) and v2.6.2 [PR#264](https://github.com/rpng/open_vins/pull/264). @@ -79,6 +79,7 @@ details on what the system supports. * Camera to IMU transform * Camera to IMU time offset * Camera intrinsics + * Inertial intrinsics (including g-sensitivity) * Environmental SLAM feature * OpenCV ARUCO tag SLAM features * Sparse feature SLAM features @@ -90,7 +91,7 @@ details on what the system supports. * Masked tracking * Static and dynamic state initialization * Zero velocity detection and updates -* Out of the box evaluation on EurocMav, TUM-VI, UZH-FPV, KAIST Urban and VIO datasets +* Out of the box evaluation on EuRocMav, TUM-VI, UZH-FPV, KAIST Urban and other VIO datasets * Extensive evaluation suite (ATE, RPE, NEES, RMSE, etc..) ## Codebase Extensions diff --git a/config/euroc_mav/estimator_config.yaml b/config/euroc_mav/estimator_config.yaml index fd3bc6d63..560750fc0 100644 --- a/config/euroc_mav/estimator_config.yaml +++ b/config/euroc_mav/estimator_config.yaml @@ -3,15 +3,15 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) -use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated max_clones: 11 # how many clones in the sliding window max_slam: 50 # number of features in our state vector @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 # how many seconds to collect initialization information init_imu_thresh: 1.5 # threshold for variance of the accelerometer to detect a "jerk" in motion init_max_disparity: 10.0 # max disparity to consider the platform stationary (dependent on resolution) -init_max_features: 75 # how many features to track during initialization (saves on computation) +init_max_features: 50 # how many features to track during initialization (saves on computation) init_dyn_use: false # if dynamic initialization should be used init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) diff --git a/config/euroc_mav/kalibr_imu_chain.yaml b/config/euroc_mav/kalibr_imu_chain.yaml index 394cdca13..c85f0f677 100644 --- a/config/euroc_mav/kalibr_imu_chain.yaml +++ b/config/euroc_mav/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu0 time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/kaist/estimator_config.yaml b/config/kaist/estimator_config.yaml index 7162487da..c72fac448 100644 --- a/config/kaist/estimator_config.yaml +++ b/config/kaist/estimator_config.yaml @@ -1,17 +1,17 @@ %YAML:1.0 # need to specify the file type at the top! -verbosity: "DEBUG" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT - -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) +verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false # degenerate motion calib_cam_intrinsics: true calib_cam_timeoffset: true # degenerate motion +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.5 init_max_disparity: 1.5 -init_max_features: 75 +init_max_features: 50 init_dyn_use: true init_dyn_mle_opt_calib: false diff --git a/config/kaist/kalibr_imu_chain.yaml b/config/kaist/kalibr_imu_chain.yaml index df2714f67..8066a5634 100644 --- a/config/kaist/kalibr_imu_chain.yaml +++ b/config/kaist/kalibr_imu_chain.yaml @@ -18,7 +18,35 @@ imu0: accelerometer_random_walk: 1.0000e-04 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.7453e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 1.0000e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu/data_raw time_offset: 0.0 - update_rate: 500.0 \ No newline at end of file + update_rate: 500.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/kaist_vio/estimator_config.yaml b/config/kaist_vio/estimator_config.yaml index b71f1a339..999a95e4c 100644 --- a/config/kaist_vio/estimator_config.yaml +++ b/config/kaist_vio/estimator_config.yaml @@ -3,15 +3,15 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) -use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # NEED TO USE STEREO! OTHERWISE CAN'T RECOVER SCALE!!!!!! DEGENERATE MOTION!!! max_cameras: 2 # NEED TO USE STEREO! OTHERWISE CAN'T RECOVER SCALE!!!!!! DEGENERATE MOTION!!! calib_cam_extrinsics: false # disable since this is a degenerate motion calib_cam_intrinsics: true calib_cam_timeoffset: false # disable since this is a degenerate motion +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 # how many clones in the sliding window max_slam: 50 # number of features in our state vector @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 # how many seconds to collect initialization information init_imu_thresh: 0.60 # threshold for variance of the accelerometer to detect a "jerk" in motion init_max_disparity: 5.0 # max disparity to consider the platform stationary (dependent on resolution) -init_max_features: 75 # how many features to track during initialization (saves on computation) +init_max_features: 50 # how many features to track during initialization (saves on computation) init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/kaist_vio/kalibr_imu_chain.yaml b/config/kaist_vio/kalibr_imu_chain.yaml index b9a3fe5d1..9b57b634f 100644 --- a/config/kaist_vio/kalibr_imu_chain.yaml +++ b/config/kaist_vio/kalibr_imu_chain.yaml @@ -14,7 +14,35 @@ imu0: accelerometer_random_walk: 0.009 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 0.001 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 0.0003 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /mavros/imu/data time_offset: 0.0 - update_rate: 100.0 \ No newline at end of file + update_rate: 100.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/rpng_aruco/estimator_config.yaml b/config/rpng_aruco/estimator_config.yaml index 0317ea1b9..02e8db9aa 100644 --- a/config/rpng_aruco/estimator_config.yaml +++ b/config/rpng_aruco/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: true init_window_time: 2.0 init_imu_thresh: 1.2 init_max_disparity: 2.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/rpng_aruco/kalibr_imu_chain.yaml b/config/rpng_aruco/kalibr_imu_chain.yaml index 394cdca13..401ddbfdc 100644 --- a/config/rpng_aruco/kalibr_imu_chain.yaml +++ b/config/rpng_aruco/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu0 time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/rpng_ironsides/estimator_config.yaml b/config/rpng_ironsides/estimator_config.yaml index 10aad5496..27dde860d 100644 --- a/config/rpng_ironsides/estimator_config.yaml +++ b/config/rpng_ironsides/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false # degenerate motion calib_cam_intrinsics: true calib_cam_timeoffset: false # degenerate motion +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.5 init_max_disparity: 1.5 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/rpng_ironsides/kalibr_imu_chain.yaml b/config/rpng_ironsides/kalibr_imu_chain.yaml index 7e14da487..d5144b3a9 100644 --- a/config/rpng_ironsides/kalibr_imu_chain.yaml +++ b/config/rpng_ironsides/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 1.3054568211204843e-04 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.1186830841306218e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 8.997530210630026e-07 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu0 time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/rpng_plane/estimator_config.yaml b/config/rpng_plane/estimator_config.yaml index cb4c23484..e2f779c13 100644 --- a/config/rpng_plane/estimator_config.yaml +++ b/config/rpng_plane/estimator_config.yaml @@ -3,15 +3,15 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) -use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs max_cameras: 1 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated max_clones: 11 # how many clones in the sliding window max_slam: 25 # number of features in our state vector @@ -40,7 +40,7 @@ zupt_only_at_beginning: true init_window_time: 2.0 # how many seconds to collect initialization information init_imu_thresh: 0.5 # threshold for variance of the accelerometer to detect a "jerk" in motion init_max_disparity: 4.0 # max disparity to consider the platform stationary (dependent on resolution) -init_max_features: 75 # how many features to track during initialization (saves on computation) +init_max_features: 50 # how many features to track during initialization (saves on computation) init_dyn_use: false # if dynamic initialization should be used init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) diff --git a/config/rpng_plane/kalibr_imu_chain.yaml b/config/rpng_plane/kalibr_imu_chain.yaml index a1d0a3c12..e078b037e 100644 --- a/config/rpng_plane/kalibr_imu_chain.yaml +++ b/config/rpng_plane/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 0.00041327852 gyroscope_noise_density: 0.00020544166 gyroscope_random_walk: 1.110622e-05 - model: calibrated rostopic: /d455/imu time_offset: 0.0 update_rate: 400 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/rpng_sim/estimator_config.yaml b/config/rpng_sim/estimator_config.yaml index e5fe73514..384a2f6f5 100644 --- a/config/rpng_sim/estimator_config.yaml +++ b/config/rpng_sim/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) -calib_cam_extrinsics: true -calib_cam_intrinsics: true -calib_cam_timeoffset: true +calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI +calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) +calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: true # if imu intrinsics should be calibrated +calib_imu_g_sensitivity: true # if gyroscope gravity sensitivity should be calibrated max_clones: 11 max_slam: 50 @@ -122,7 +122,7 @@ sim_seed_preturb: 0 sim_seed_measurements: 0 sim_do_perturbation: false sim_traj_path: "src/open_vins/ov_data/sim/tum_corridor1_512_16_okvis.txt" -sim_distance_threshold: 1.2 +sim_distance_threshold: 1.1 sim_freq_cam: 10 sim_freq_imu: 400 sim_min_feature_gen_dist: 5.0 diff --git a/config/rpng_sim/kalibr_imu_chain.yaml b/config/rpng_sim/kalibr_imu_chain.yaml index 394cdca13..401ddbfdc 100644 --- a/config/rpng_sim/kalibr_imu_chain.yaml +++ b/config/rpng_sim/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu0 time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/rs_d455/estimator_config.yaml b/config/rs_d455/estimator_config.yaml index 053c8a0cf..226295345 100644 --- a/config/rs_d455/estimator_config.yaml +++ b/config/rs_d455/estimator_config.yaml @@ -3,15 +3,15 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) -use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs max_cameras: 1 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated max_clones: 11 # how many clones in the sliding window max_slam: 50 # number of features in our state vector @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 # how many seconds to collect initialization information init_imu_thresh: 1.5 # threshold for variance of the accelerometer to detect a "jerk" in motion init_max_disparity: 10.0 # max disparity to consider the platform stationary (dependent on resolution) -init_max_features: 75 # how many features to track during initialization (saves on computation) +init_max_features: 50 # how many features to track during initialization (saves on computation) init_dyn_use: false # if dynamic initialization should be used init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) diff --git a/config/rs_d455/kalibr_imu_chain.yaml b/config/rs_d455/kalibr_imu_chain.yaml index a8ea0cb15..7b3c7b06b 100644 --- a/config/rs_d455/kalibr_imu_chain.yaml +++ b/config/rs_d455/kalibr_imu_chain.yaml @@ -19,7 +19,35 @@ imu0: accelerometer_random_walk: 0.00041327852 gyroscope_noise_density: 0.00020544166 gyroscope_random_walk: 0.00001110622 - model: calibrated rostopic: /d455/imu time_offset: 0.0 update_rate: 400 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/rs_t265/estimator_config.yaml b/config/rs_t265/estimator_config.yaml index 827e0c998..21b0b8623 100644 --- a/config/rs_t265/estimator_config.yaml +++ b/config/rs_t265/estimator_config.yaml @@ -3,15 +3,15 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) -use_imuavg: true # if using discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true # if the transform between camera and IMU should be optimized R_ItoC, p_CinI calib_cam_intrinsics: true # if camera intrinsics should be optimized (focal, center, distortion) calib_cam_timeoffset: true # if timeoffset between camera and IMU should be optimized +calib_imu_intrinsics: false # if imu intrinsics should be calibrated (rotation and skew-scale matrix) +calib_imu_g_sensitivity: false # if gyroscope gravity sensitivity (Tg) should be calibrated max_clones: 11 # how many clones in the sliding window max_slam: 50 # number of features in our state vector @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 # how many seconds to collect initialization information init_imu_thresh: 1.5 # threshold for variance of the accelerometer to detect a "jerk" in motion init_max_disparity: 10.0 # max disparity to consider the platform stationary (dependent on resolution) -init_max_features: 75 # how many features to track during initialization (saves on computation) +init_max_features: 50 # how many features to track during initialization (saves on computation) init_dyn_use: false # if dynamic initialization should be used init_dyn_mle_opt_calib: false # if we should optimize calibration during intialization (not recommended) diff --git a/config/rs_t265/kalibr_imu_chain.yaml b/config/rs_t265/kalibr_imu_chain.yaml index f553b6349..2159965f9 100644 --- a/config/rs_t265/kalibr_imu_chain.yaml +++ b/config/rs_t265/kalibr_imu_chain.yaml @@ -6,12 +6,7 @@ imu0: - [0.0, 1.0, 0.0, 0.0] - [0.0, 0.0, 1.0, 0.0] - [0.0, 0.0, 0.0, 1.0] - # Values from allan plots - # https://github.com/rpng/ar_table_dataset - #accelerometer_noise_density: 0.001464164197530967 - #accelerometer_random_walk: 6.696534319519138e-05 - #gyroscope_noise_density: 0.00010833365085022152 - #gyroscope_random_walk: 1.1687657234840692e-06 + # Values from allan plots of ~20hour dataset # Inflated values (to account for unmodelled effects) # - white noise multiplied by 5 # - bias random walk multiplied by 10 @@ -19,9 +14,37 @@ imu0: accelerometer_random_walk: 0.0006696534319519138 gyroscope_noise_density: 0.00054166825 gyroscope_random_walk: 1.1687657234840692e-05 - model: calibrated rostopic: /t265/imu time_offset: 0.0 update_rate: 200 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] diff --git a/config/tum_vi/estimator_config.yaml b/config/tum_vi/estimator_config.yaml index 0e90af7e4..708cb3e53 100644 --- a/config/tum_vi/estimator_config.yaml +++ b/config/tum_vi/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: true calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: true init_window_time: 1.5 # make 2sec if using dynamic... init_imu_thresh: 0.45 # room1-5:0.45, room6:0.25 init_max_disparity: 15.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/tum_vi/kalibr_imu_chain.yaml b/config/tum_vi/kalibr_imu_chain.yaml index 8571eeb9f..9c474d1a8 100644 --- a/config/tum_vi/kalibr_imu_chain.yaml +++ b/config/tum_vi/kalibr_imu_chain.yaml @@ -10,7 +10,35 @@ imu0: accelerometer_random_walk: 0.00086 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 0.00016 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 2.2e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /imu0 time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/uzhfpv_indoor/estimator_config.yaml b/config/uzhfpv_indoor/estimator_config.yaml index e7de6e346..f1859a319 100644 --- a/config/uzhfpv_indoor/estimator_config.yaml +++ b/config/uzhfpv_indoor/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.30 init_max_disparity: 2.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/uzhfpv_indoor/kalibr_imu_chain.yaml b/config/uzhfpv_indoor/kalibr_imu_chain.yaml index 5e91ccd79..dec36b504 100644 --- a/config/uzhfpv_indoor/kalibr_imu_chain.yaml +++ b/config/uzhfpv_indoor/kalibr_imu_chain.yaml @@ -10,11 +10,39 @@ imu0: accelerometer_random_walk: 3.0000e-3 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) gyroscope_noise_density: 1.6968e-04 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) gyroscope_random_walk: 1.9393e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) -# accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" ) -# accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) -# gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) -# gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated + # accelerometer_noise_density: 0.1 # [ m / s^2 / sqrt(Hz) ] ( accel "white noise" ) + # accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) + # gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) + # gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) rostopic: /snappy_imu time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/uzhfpv_indoor_45/estimator_config.yaml b/config/uzhfpv_indoor_45/estimator_config.yaml index e7de6e346..f1859a319 100644 --- a/config/uzhfpv_indoor_45/estimator_config.yaml +++ b/config/uzhfpv_indoor_45/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.30 init_max_disparity: 2.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/uzhfpv_indoor_45/kalibr_imu_chain.yaml b/config/uzhfpv_indoor_45/kalibr_imu_chain.yaml index 32c182c76..dec36b504 100644 --- a/config/uzhfpv_indoor_45/kalibr_imu_chain.yaml +++ b/config/uzhfpv_indoor_45/kalibr_imu_chain.yaml @@ -14,7 +14,35 @@ imu0: # accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) # gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) # gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /snappy_imu time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/uzhfpv_outdoor/estimator_config.yaml b/config/uzhfpv_outdoor/estimator_config.yaml index 29a80ccb1..6f548165d 100644 --- a/config/uzhfpv_outdoor/estimator_config.yaml +++ b/config/uzhfpv_outdoor/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - -use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) +use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.30 init_max_disparity: 2.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/uzhfpv_outdoor/kalibr_imu_chain.yaml b/config/uzhfpv_outdoor/kalibr_imu_chain.yaml index 32c182c76..dec36b504 100644 --- a/config/uzhfpv_outdoor/kalibr_imu_chain.yaml +++ b/config/uzhfpv_outdoor/kalibr_imu_chain.yaml @@ -14,7 +14,35 @@ imu0: # accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) # gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) # gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /snappy_imu time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/config/uzhfpv_outdoor_45/estimator_config.yaml b/config/uzhfpv_outdoor_45/estimator_config.yaml index e7de6e346..d0abb79cf 100644 --- a/config/uzhfpv_outdoor_45/estimator_config.yaml +++ b/config/uzhfpv_outdoor_45/estimator_config.yaml @@ -2,16 +2,16 @@ verbosity: "INFO" # ALL, DEBUG, INFO, WARNING, ERROR, SILENT -use_fej: true # if we should use first-estimate Jacobians (enable for good consistency) -use_imuavg: true # for our discrete integration, if we should average sequential IMU measurements to "smooth" it -use_rk4int: true # if rk4 integration should be used (overrides imu averaging) - -use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints -max_cameras: 2 +use_fej: true # if first-estimate Jacobians should be used (enable for good consistency) +integration: "rk4" # discrete, rk4, analytical (if rk4 or analytical used then analytical covariance propagation is used) +use_stereo: true # if we have more than 1 camera, if we should try to track stereo constraints between pairs +max_cameras: 2 # how many cameras we have 1 = mono, 2 = stereo, >2 = binocular (all mono tracking) calib_cam_extrinsics: false calib_cam_intrinsics: true calib_cam_timeoffset: true +calib_imu_intrinsics: false +calib_imu_g_sensitivity: false max_clones: 11 max_slam: 50 @@ -40,7 +40,7 @@ zupt_only_at_beginning: false init_window_time: 2.0 init_imu_thresh: 0.30 init_max_disparity: 2.0 -init_max_features: 75 +init_max_features: 50 init_dyn_use: false init_dyn_mle_opt_calib: false diff --git a/config/uzhfpv_outdoor_45/kalibr_imu_chain.yaml b/config/uzhfpv_outdoor_45/kalibr_imu_chain.yaml index 32c182c76..dec36b504 100644 --- a/config/uzhfpv_outdoor_45/kalibr_imu_chain.yaml +++ b/config/uzhfpv_outdoor_45/kalibr_imu_chain.yaml @@ -14,7 +14,35 @@ imu0: # accelerometer_random_walk: 0.002 # [ m / s^3 / sqrt(Hz) ]. ( accel bias diffusion ) # gyroscope_noise_density: 0.05 # [ rad / s / sqrt(Hz) ] ( gyro "white noise" ) # gyroscope_random_walk: 4.0e-05 # [ rad / s^2 / sqrt(Hz) ] ( gyro bias diffusion ) - model: calibrated rostopic: /snappy_imu time_offset: 0.0 - update_rate: 200.0 \ No newline at end of file + update_rate: 200.0 + # three different modes supported: + # "calibrated" (same as "kalibr"), "kalibr", "rpng" + model: "kalibr" + # how to get from Kalibr imu.yaml result file: + # - Tw is imu0:gyroscopes:M: + # - R_IMUtoGYRO: is imu0:gyroscopes:M: + # - Ta is imu0:accelerometers:M: + # - R_IMUtoACC not used by Kalibr + # - Tg is imu0:gyroscopes:A: + Tw: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoGYRO: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Ta: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + R_IMUtoACC: + - [ 1.0, 0.0, 0.0 ] + - [ 0.0, 1.0, 0.0 ] + - [ 0.0, 0.0, 1.0 ] + Tg: + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] + - [ 0.0, 0.0, 0.0 ] \ No newline at end of file diff --git a/docs/bib/extra.bib b/docs/bib/extra.bib index fff6db0bd..3e5879090 100644 --- a/docs/bib/extra.bib +++ b/docs/bib/extra.bib @@ -1,3 +1,16 @@ +@book{Bar2001, + title = {Estimation with applications to tracking and navigation: theory algorithms and software}, + author = {Bar-Shalom, Yaakov and Li, X Rong and Kirubarajan, Thiagalingam}, + year = 2001, + publisher = {John Wiley \& Sons} +} +@book{Barfoot2017, + title = {State estimation for robotics}, + author = {Barfoot, Timothy D}, + year = 2017, + publisher = {Cambridge University Press}, + url = {http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf} +} @article{Burri2016IJRR, title = {The EuRoC micro aerial vehicle datasets}, author = {Burri, Michael and Nikolic, Janosch and Gohl, Pascal and Schneider, Thomas and Rehder, Joern and Omari, Sammy and Achtelik, Markus W and Siegwart, Roland}, @@ -14,6 +27,20 @@ @misc{ceres-solver url = {http://ceres-solver.org}, howpublished = {\url{http://ceres-solver.org}} } +@book{Chatfield1997, + title = {Fundamentals of high accuracy inertial navigation}, + author = {Chatfield, Averil Burton}, + year = 1997, + publisher = {Aiaa}, + volume = 174 +} +@inproceedings{Chen2022ICRA, + title = {FEJ2: A Consistent Visual-Inertial State Estimator Design}, + author = {Chuchu Chen and Yulin Yang and Patrick Geneva and Guoquan Huang}, + year = 2022, + booktitle = {2022 International Conference on Robotics and Automation (ICRA)}, + url = {https://chuchuchen.net/downloads/papers/Chen2022ICRA.pdf} +} @inproceedings{Chen2023ICRA, title = {Monocular Visual-Inertial Odometry with Planar Regularities}, author = {Chuchu Chen and Patrick Geneva and Yuxiang Peng and Woosik Lee and Guoquan Huang}, @@ -21,6 +48,13 @@ @inproceedings{Chen2023ICRA booktitle = {2023 International Conference on Robotics and Automation (ICRA)}, url = {https://pgeneva.com/downloads/papers/Chen2023ICRA.pdf} } +@book{Chirikjian2011, + title = {Stochastic Models, Information Theory, and Lie Groups, Volume 2: Analytic Methods and Modern Applications}, + author = {Chirikjian, Gregory}, + year = 2011, + publisher = {Springer Science \& Business Media}, + volume = 2 +} @inproceedings{Davidson2009ENC, title = {Improved vehicle positioning in urban environment through integration of GPS and low-cost inertial sensors}, author = {Davidson, Pavel and Hautam{\"a}ki, Jani and Collin, Jussi and Takala, Jarmo}, @@ -53,8 +87,17 @@ @inproceedings{Dong2012IROS year = 2012, booktitle = {2012 IEEE/RSJ International Conference on Intelligent Robots and Systems}, pages = {1064--1071}, + url = {http://tdongsi.github.io/download/pubs/DongSi2012IROS.pdf}, organization = {IEEE} } +@techreport{Eckenhoff2018TR, + title = {Continuous Preintegration Theory for Visual-Inertial Navigation}, + author = {Kevin Eckenhoff and Patrick Geneva and Guoquan Huang}, + year = 2018, + number = {RPNG-2018-CPI}, + note = {Available: \url{http://udel.edu/~ghuang/papers/tr_cpi.pdf}}, + institution = {University of Delaware} +} @article{Eckenhoff2019IJRR, title = {Closed-form preintegration methods for graph-based visual-inertial navigation}, author = {Kevin Eckenhoff and Patrick Geneva and Guoquan Huang}, @@ -63,7 +106,13 @@ @article{Eckenhoff2019IJRR volume = 38, number = 5, doi = {10.1177/0278364919835021}, - url = {https://doi.org/10.1177/0278364919835021} + url = {https://pgeneva.com/downloads/preprints/Eckenhoff2019IJRR.pdf} +} +@book{Farrell2008, + title = {Aided navigation: GPS with high rate sensors}, + author = {Farrell, Jay}, + year = 2008, + publisher = {McGraw-Hill, Inc.} } @inproceedings{Furgale2013IROS, title = {Unified temporal and spatial calibration for multi-sensor systems}, @@ -71,6 +120,7 @@ @inproceedings{Furgale2013IROS year = 2013, booktitle = {2013 IEEE/RSJ International Conference on Intelligent Robots and Systems}, pages = {1280--1286}, + url = {https://furgalep.github.io/bib/furgale_iros13.pdf}, organization = {IEEE} } @techreport{Geneva2020TRVICON2GT, @@ -81,6 +131,42 @@ @techreport{Geneva2020TRVICON2GT url = {http://udel.edu/~ghuang/papers/tr_vicon2gt.pdf}, institution = {University of Delaware} } +@techreport{Hesch2012TR, + title = {Observability-constrained Vision-aided Inertial Navigation}, + author = {Joel A. Hesch and Dimitrios G. Kottas and Sean L. Bowman and Stergios I. Roumeliotis}, + year = 2012, + note = {Available: \url{https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.721.6118&rep=rep1&type=pdf}}, + institution = {Dept. of Computer Science \& Engineering, University of Minnesota} +} +@article{Hesch2013TRO, + title = {Consistency analysis and improvement of vision-aided inertial navigation}, + author = {Hesch, Joel A and Kottas, Dimitrios G and Bowman, Sean L and Roumeliotis, Stergios I}, + year = 2013, + journal = {IEEE Transactions on Robotics}, + publisher = {IEEE}, + volume = 30, + number = 1, + pages = {158--176} +} +@article{Hesch2014IJRR, + title = {Camera-IMU-based localization: Observability analysis and consistency improvement}, + author = {Hesch, Joel A and Kottas, Dimitrios G and Bowman, Sean L and Roumeliotis, Stergios I}, + year = 2014, + journal = {The International Journal of Robotics Research}, + publisher = {SAGE Publications Sage UK: London, England}, + volume = 33, + number = 1, + pages = {182--201} +} +@inproceedings{Huang2009ISER, + title = {A first-estimates Jacobian EKF for improving SLAM consistency}, + author = {Huang, Guoquan P and Mourikis, Anastasios I and Roumeliotis, Stergios I}, + year = 2009, + booktitle = {Experimental Robotics: The Eleventh International Symposium}, + pages = {373--382}, + url = {https://intra.ece.ucr.edu/~mourikis/papers/Huang08-ISER.pdf}, + organization = {Springer} +} @article{Huang2010IJRR, title = {Observability-based rules for designing consistent EKF SLAM estimators}, author = {Huang, Guoquan P and Mourikis, Anastasios I and Roumeliotis, Stergios I}, @@ -128,6 +214,13 @@ @inproceedings{Lee2020IROS url = {http://udel.edu/~ghuang/papers/tr_wheel-vio.pdf}, organization = {IEEE} } +@techreport{Li2012TR, + title = {Consistency of EKF-Based Visual-Inertial Odometry}, + author = {Mingyang Li and Anastasios I. Mourikis}, + year = 2012, + note = {Available: \url{https://intra.ece.ucr.edu/~mourikis/tech_reports/VIO.pdf}}, + institution = {Dept. of Electrical Engineering, University of California, Riverside} +} @article{Li2013IJRR, title = {High-precision, consistent EKF-based visual-inertial odometry}, author = {Li, Mingyang and Mourikis, Anastasios I}, @@ -136,7 +229,26 @@ @article{Li2013IJRR publisher = {SAGE Publications Sage UK: London, England}, volume = 32, number = 6, - pages = {690--711} + pages = {690--711}, + url = {https://intra.ece.ucr.edu/~mourikis/papers/Li2013IJRR.pdf} +} +@inproceedings{Li2014ICRA, + title = {High-fidelity sensor modeling and self-calibration in vision-aided inertial navigation}, + author = {Mingyang Li and Hongsheng Yu and Xing Zheng and and Anastasios I. Mourikis}, + year = 2014, + month = {May}, + booktitle = {IEEE International Conference on Robotics and Automation (ICRA)}, + pages = {409--416} +} +@article{Li2014IJRR, + title = {Online temporal calibration for camera--IMU systems: Theory and algorithms}, + author = {Li, Mingyang and Mourikis, Anastasios I}, + year = 2014, + journal = {The International Journal of Robotics Research}, + publisher = {SAGE Publications Sage UK: London, England}, + volume = 33, + number = 7, + pages = {947--964} } @phdthesis{Li2014THESIS, title = {Visual-inertial odometry on resource-constrained systems}, @@ -159,7 +271,7 @@ @inproceedings{Mourikis2007ICRA year = 2007, booktitle = {Proceedings 2007 IEEE International Conference on Robotics and Automation}, pages = {3565--3572}, - url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.437.1085&rep=rep1&type=pdf}, + url = {https://www-users.cse.umn.edu/~stergios/papers/ICRA07-MSCKF.pdf}, organization = {IEEE} } @article{Mueggler2018TRO, @@ -181,7 +293,7 @@ @article{Patron2015IJCV pages = {208--219} } @article{Qin2018TRO, - title = {Vins-mono: A robust and versatile monocular visual-inertial state estimator}, + title = {{VINS-Mono}: A robust and versatile monocular visual-inertial state estimator}, author = {Qin, Tong and Li, Peiliang and Shen, Shaojie}, year = 2018, journal = {IEEE Transactions on Robotics}, @@ -201,6 +313,27 @@ @article{Ramanandan2011TITS number = 1, pages = {235--248} } +@article{Rehder2017Sensors, + title = {Camera/IMU calibration revisited}, + author = {Rehder, Joern and Siegwart, Roland}, + year = 2017, + journal = {IEEE Sensors Journal}, + publisher = {IEEE}, + volume = 17, + number = 11, + pages = {3257--3268} +} +@article{Schneider2019Sensor, + title = {Observability-aware self-calibration of visual and inertial sensors for ego-motion estimation}, + author = {Schneider, Thomas and Li, Mingyang and Cadena, Cesar and Nieto, Juan and Siegwart, Roland}, + year = 2019, + journal = {IEEE Sensors Journal}, + publisher = {IEEE}, + volume = 19, + number = 10, + pages = {3846--3860}, + url = {https://arxiv.org/pdf/1901.07242.pdf} +} @inproceedings{Schubert2018IROS, title = {The TUM VI benchmark for evaluating visual-inertial odometry}, author = {Schubert, David and Goll, Thore and Demmel, Nikolaus and Usenko, Vladyslav and St { \"u } ckler, J { \"o } rg and Cremers, Daniel}, @@ -229,7 +362,7 @@ @inproceedings{Wagstaff2017IPIN organization = {IEEE} } @inproceedings{Wu2017ICRA, - title = {Vins on wheels}, + title = {{VINS} on wheels}, author = {Wu, Kejian J and Guo, Chao X and Georgiou, Georgios and Roumeliotis, Stergios I}, year = 2017, booktitle = {2017 IEEE International Conference on Robotics and Automation (ICRA)}, @@ -237,6 +370,35 @@ @inproceedings{Wu2017ICRA url = {http://mars.cs.umn.edu/papers/KejianWu_VINSonWheels.pdf}, organization = {IEEE} } +@techreport{Yang2019TR_ACI, + title = {Supplementary Material: Analytic Combined IMU Integrator (ACI$^2$) For Visual Inertial Navigation}, + author = {Yulin Yang and Chuchu Chen and Guoquan Huang}, + year = 2019, + note = {Available: \url{http://udel.edu/~yuyang/downloads/suplementary\_2020ICRA.pdf}}, + institution = {RPNG, University of Delaware} +} +@inproceedings{Yang2020ICRA, + title = {Analytic Combined IMU Integration (ACI$^2$) for Visual-Inertial Navigation}, + author = {Yulin Yang and B. P. W. Babu and Chuchu Chen and Guoquan Huang and Liu Ren}, + year = 2020, + booktitle = {Proc. of the IEEE International Conference on Robotics and Automation}, + address = {Paris, France}, + url = {https://yangyulin.net/papers/2020_icra_aci.pdf} +} +@article{Yang2020RSS, + title = {Online imu intrinsic calibration: Is it necessary?}, + author = {Yang, Yulin and Geneva, Patrick and Zuo, Xingxing and Huang, Guoquan}, + year = 2020, + journal = {Proc. of Robotics: Science and Systems (RSS), Corvallis, Or}, + url = {http://www.roboticsproceedings.org/rss16/p026.pdf} +} +@article{Yang2023TRO, + title = {Online Self-Calibration for Visual-Inertial Navigation: Models, Analysis and Degeneracy}, + author = {Yulin Yang, Patrick Geneva, Xingxing Zuo, Guoquan Huang}, + year = 2023, + journal = {IEEE Transactions on Robotics}, + url = {https://pgeneva.com/downloads/preprints/Yang2023TRO.pdf} +} @inproceedings{Zhang2018IROS, title = {A tutorial on quantitative trajectory evaluation for visual(-inertial) odometry}, author = {Zhang, Zichao and Scaramuzza, Davide}, diff --git a/docs/dev-docs.dox b/docs/dev-docs.dox index cee9ad98a..29f683149 100644 --- a/docs/dev-docs.dox +++ b/docs/dev-docs.dox @@ -53,22 +53,23 @@ 2. You will need to install python3.6 - `sudo add-apt-repository ppa:deadsnakes/ppa` - `sudo apt-get update` - - `sudo apt-get install python3.6` - - `curl https://bootstrap.pypa.io/get-pip.py | sudo python3.6` + - `sudo apt-get install python3.6 python3.6-distutils` + - `curl https://bootstrap.pypa.io/pip/3.6/get-pip.py | sudo python3.6` - `sudo -H pip3.6 install jinja2 Pygments` - `sudo apt install texlive-base texlive-latex-extra texlive-fonts-extra texlive-fonts-recommended` 3. To use the bibtex citing you need to have - - bibtex executable in search path - - perl executable in search path + - `bibtex` and `perl` executable in search path + - These should be installed through the texlive installation - http://www.doxygen.nl/manual/commands.html#cmdcite 4. Go into the documentation folder and build - `cd m.css/documentation/` - `python3.6 doxygen.py ` + - `python3.6 doxygen.py ~/catkin_ws/src/open_vins/Doxyfile-mcss` 5. If you run into errors, ensure your python path is set to use the 3.6 libraries - ` export PYTHONPATH=/usr/local/lib/python3.6/dist-packages/` 6. You might need to comment out the `enable_async=True` flag in the doxygen.py file 7. This should then build the documentation website -8. Open the html page `/open_vins/doxgen_generated/html/index.html` to view documentation +8. Open the html page `~/catkin_ws/src/open_vins/doxgen_generated/html/index.html` to view documentation @section developers-theme Custom m.css Theme diff --git a/docs/dev-index.dox b/docs/dev-index.dox index 7472c7a72..3a405f26e 100644 --- a/docs/dev-index.dox +++ b/docs/dev-index.dox @@ -36,14 +36,16 @@ The error state needs to be a vector and thus a type will need to define the map To show the power of this type-based indexing system, we will go through how we compute the EKF update. The specific method we will be looking at is the @ref ov_msckf::StateHelper::EKFUpdate() which takes in the state, vector of types, Jacobian, residual, and measurement covariance. -As compared to passing a Jacobian matrix that is the full size of state wide, we instead leverage this type system by passing a "small" Jacobian that has only the state elements that the measurements are a function of. +As compared to passing a "big" Jacobian matrix that is the full size of state wide, we instead leverage this type system by passing a "small" Jacobian that has only the state elements that the measurements are a function of. + +@image html hxorder.png width=40% For example, if we have a global 3D SLAM feature update (implemented in @ref ov_msckf::UpdaterSLAM) our Jacobian will only be a function of the newest clone and the feature. This means that our Jacobian is only of size 9 as compared to the size our state. In addition to the matrix containing the Jacobian elements, we need to know what order this Jacobian is in, thus we pass a vector of types which correspond to the state elements our Jacobian is a function of. Thus in our example, it would contain two types: our newest clone @ref ov_type::PoseJPL and current landmark feature @ref ov_type::Landmark with our Jacobian being the type size of the pose plus the type size of the landmark in width. - +In the above figure the orientation and position would correspond to the x1 and x2 states, while the feature is the x5 state. diff --git a/docs/dev-roadmap.dox b/docs/dev-roadmap.dox index 250029490..42581f8d2 100644 --- a/docs/dev-roadmap.dox +++ b/docs/dev-roadmap.dox @@ -3,19 +3,14 @@ @page dev-roadmap Future Roadmap - The initial release provides the library foundation which contains a filter-based visual-inertial localization solution. This can be used in a wide range of scenarios and the type-based index system allows for others to easily add new features and develop on top of this system. Here is a list of future directions, although nothing is set in stone, that we are interested in taking: - Creation of a secondary graph-based thread that loosely introduces loop closures (akin to the second thread of VINS-Mono and others) which should allow for drift free long-term localization. -- Large scale offline batch graph optimization which leverages the trajectory of the ov_msckf as its initial guess and then optimizes both the map and trajectory. +- Large scale offline batch graph optimization which leverages the trajectory of the ov_msckf as its initial guess and then optimizes both the map and trajectory (see [ov_maplab](https://github.com/rpng/ov_maplab) project!). - Incorporate our prior work in preintegration @cite Eckenhoff2019IJRR into the same framework structure to allow for easy extensibility. Focus on sparsification and marginalization to allow for realtime computation. - Support for arbitrary Schmidt'ing of state elements allowing for modeling of their prior uncertainties but without optimizing their values online. -- More advance imu integration schemes, quantification of the integration noises to handle low frequency readings, and modeling of the imu intrinsics. - - - */ \ No newline at end of file diff --git a/docs/dev-ros1-to-ros2.dox b/docs/dev-ros1-to-ros2.dox index 6589faaf0..7c7978e43 100644 --- a/docs/dev-ros1-to-ros2.dox +++ b/docs/dev-ros1-to-ros2.dox @@ -13,11 +13,17 @@ This is what was used to generate the converted ROS2 bag files for standard data To do a conversion of a bag file we can do the following: @code{.shell-session} -pip install rosbags -rosbags-convert V1_01_easy.bag +pip3 install rosbags>=0.9.11 +rosbags-convert V1_01_easy.bag --dst @endcode +If you need to create a bag from images, then we recommend using the [Kalibr bagcreater](https://github.com/ethz-asl/kalibr/wiki/bag-format#bagcreater) utility. +To convert a ROS2 bag recorded to a ROS1 bag one which can then be processed by Kalibr or a ROS1 codebase, the following can be run: +@code{.shell-session} +pip3 install rosbags>=0.9.12 +rosbags-convert --dst calib_01.bag --exclude-topic +@endcode @section dev-ros1-to-ros2-option-2 rosbag2 play @@ -42,6 +48,7 @@ source_ros2 ros2 bag play -s rosbag_v2 V1_01_easy.bag @endcode +We don't recommend this method and instead suggest using the [rosbags](https://gitlab.com/ternaris/rosbags) utility as standalone. diff --git a/docs/eval-error.dox b/docs/eval-error.dox index 129b95ce7..ae5dc8c7b 100644 --- a/docs/eval-error.dox +++ b/docs/eval-error.dox @@ -38,8 +38,8 @@ By default the EuRoC groundtruth has the timestamp in nanoseconds and the quater A user can either process all CSV files in a given folder, or just a specific one. @code{.shell-session} -rosrun ov_eval format_convert folder/path/ -rosrun ov_eval format_convert file.csv +rosrun ov_eval format_converter folder/path/ +rosrun ov_eval format_converter file.csv @endcode In addition we have a specific folder structure that is assumed. @@ -98,7 +98,7 @@ It will use the filename as the name in the legend, so you can change that to ch @code{.shell-session} rosrun ov_eval plot_trajectories ... -rosrun ov_eval plot_trajectories posyaw 1565371553_estimate.txt truths/V1_01_easy.txt +rosrun ov_eval plot_trajectories posyaw truths/V1_01_easy.txt 1565371553_estimate.txt @endcode @image html eval/plot_xy.png width=70% diff --git a/docs/fej.dox b/docs/fej.dox index 8cd6ec891..2722fafb3 100644 --- a/docs/fej.dox +++ b/docs/fej.dox @@ -4,21 +4,29 @@ @page fej First Estimate Jacobian (FEJ) Estimation @tableofcontents +The observability and consistency of VINS is crucial due to its ability to provide: +(i) the minimal conditions for initialization, +(ii) insights into what states are unrecoverable, and +(iii) degenerate motions which can hurt the performance of the system. +Naive EKF-based VINS estimators have been shown to be inconsistent due to spurious information gain along unobservable directions +and have required the creation of "observability aware" filters which explicitly address the inaccurate information gains causing +filter over-confidence (the estimated uncertainty is smaller than the true). +Bar-Shalom et al. @cite Bar2001 make a convincing argument of why estimator consistency is crucial for accuracy and robust estimation: +> Since the filter gain is based on the filter-calculated error covariances, it follows that consistency is necessary for filter optimality: Wrong covariance yield wrong gain. +> This is why consistency evaluation is vital for verifying a filter design -- *it amounts to evaluation of estimator optimality*. -@section fej-sys-evolution EKF Linearized Error-State System -When developing an extended Kalman filter (EKF), one needs to linearize the nonlinear motion and measurement models about some linearization point. -This linearization is one of the sources of error causing inaccuracies in the estimates (in addition to, for exmaple, model errors and measurement noise). -Let us consider the following linearized error-state visual-inertial system: +In this section, we will introduce First-estimates Jacobian (FEJ) @cite Huang2009ISER @cite Huang2010IJRR methodology which can guarantee +the observability properties of VINS and improve the estimation consistency. + -\f{align*}{ -\tilde{\mathbf{x}}_{k|k-1} &= \mathbf{\Phi}_{(k,k-1)}~\tilde{\mathbf{x}}_{k-1|k-1} + \mathbf{G}_{k}\mathbf{w}_{k} \\ -\tilde{\mathbf{z}}_{k} &= \mathbf{H}_{k}~\tilde{\mathbf{x}}_{k|k-1}+\mathbf{n}_{k} -\f} -where the state contains the inertial navigation state and a single environmental feature -(noting that we do not include biases to simplify the derivations): +@section fej-sys-evolution EKF Linearized Error-State System + +When developing an Extended Kalman filter (EKF), one needs to linearize the nonlinear motion and measurement models about some linearization points. +This linearization is one of the sources of error causing inaccuracies in the estimates (in addition to, for example, model errors and measurement noise). +To simplify the derivations, in what follows, we consider the state contains the inertial navigation state and a single environmental feature (no biases). \f{align*}{ \mathbf{x}_k &= @@ -30,40 +38,17 @@ where the state contains the inertial navigation state and a single environmenta \end{bmatrix}^{\top} \f} -Note that we use the left quaternion error state -(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR for details). -For simplicity we assume that the camera and IMU frame have an identity transform. -We can compute the measurement Jacobian of a given feature based on the perspective projection camera model -at the *k*-th timestep as follows: +We refer the reader to @cite Huang2010IJRR @cite Li2013IJRR @cite Hesch2013TRO @cite Hesch2014IJRR @cite Chen2022ICRA for more detailed derivations of the full state variables. +Let us consider the following simplified linearized error-state visual-inertial system with the IMU kinematic motion model and camera measurement update: \f{align*}{ -\mathbf{H}_{k} &= -\mathbf H_{proj,k}~\mathbf H_{state,k} \\ -&= -\begin{bmatrix} -\frac{1}{{}^Iz} & 0 & \frac{-{}^Ix}{({}^Iz)^2} \\ -0 & \frac{1}{{}^Iz} & \frac{-{}^Iy}{({}^Iz)^2} \\ -\end{bmatrix} -\begin{bmatrix} -\lfloor {}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor & --{}^{I_k}_{G}\mathbf{R} & -\mathbf 0_{3\times3} & -{}^{I_k}_{G}\mathbf{R} -\end{bmatrix} \\ -&= -\mathbf H_{proj,k}~ -{}^{I_k}_{G}\mathbf{R} -\begin{bmatrix} -\lfloor ({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor {}^{I_k}_{G}\mathbf{R}^\top & --\mathbf I_{3\times3} & -\mathbf 0_{3\times3} & -\mathbf I_{3\times3} -\end{bmatrix} +\tilde{\mathbf{x}}_{k|k-1} &= \mathbf{\Phi}_{(k,k-1)}~\tilde{\mathbf{x}}_{k-1|k-1} + \mathbf{G}_{k}\mathbf{w}_{k} \\ +\tilde{\mathbf{z}}_{k} &= \mathbf{H}_{k}~\tilde{\mathbf{x}}_{k|k-1}+\mathbf{n}_{k} \f} - -The state-transition (or system Jacobian) matrix from timestep *k-1* to *k* as (see [@ref propagation] for more details): - +Note that we use the left quaternion error state (see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR and @ref ov_type::JPLQuat for details). +For simplicity we assume that the camera and IMU frame have an identity transform. +The state-transition (or system Jacobian) matrix from timestep *k-1* to *k* can be derived as (see [@ref propagation] for more details): \f{align*}{ \mathbf{\Phi}_{(k,k-1)}&= @@ -93,15 +78,47 @@ The state-transition (or system Jacobian) matrix from timestep *k-1* to *k* as ( \f} -where \f$\mathbf a(\tau)\f$ is the true acceleration at time \f$\tau \f$, \f${}^{I_{k}}_{I_{k-1}}\mathbf R\f$ is computed using the gyroscope angular velocity measurements, and \f${}^{G}\mathbf g = [0 ~ 0 ~ 9.81]^\top\f$ is gravity in the global frame of reference. -During propagation one would need to solve these integrals using either analytical or numerical integration, -while we here are interested in how the state evolves in order to examine its observability. +where \f$\mathbf a(\tau)\f$ is the true acceleration at time \f$\tau \f$, \f${}^{I_{k}}_{I_{k-1}}\mathbf R\f$ is computed +using the gyroscope angular velocity measurements, and \f${}^{G}\mathbf g = [0 ~ 0 ~ 9.81]^\top\f$ is gravity in the global frame of reference. +During propagation one would need to solve these integrals using either analytical or numerical integration. + +We can compute the measurement Jacobian of a given feature based on the perspective projection camera model +at the *k*-th timestep as follows (see [@ref update-feat] for more details): + +\f{align*}{ +\mathbf{H}_{k} &= +\mathbf H_{proj,k}~\mathbf H_{state,k} \\ +&= +\begin{bmatrix} +\frac{1}{{}^Iz} & 0 & \frac{-{}^Ix}{({}^Iz)^2} \\ +0 & \frac{1}{{}^Iz} & \frac{-{}^Iy}{({}^Iz)^2} \\ +\end{bmatrix} +\begin{bmatrix} +\lfloor {}^{I_k}_{G}\mathbf{R}({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor & +-{}^{I_k}_{G}\mathbf{R} & +\mathbf 0_{3\times3} & +{}^{I_k}_{G}\mathbf{R} +\end{bmatrix} \\ +&= +\mathbf H_{proj,k}~ +{}^{I_k}_{G}\mathbf{R} +\begin{bmatrix} +\lfloor ({}^{G}\mathbf{p}_f-{}^{G}\mathbf{p}_{I_k}) \times\rfloor {}^{I_k}_{G}\mathbf{R}^\top & +-\mathbf I_{3\times3} & +\mathbf 0_{3\times3} & +\mathbf I_{3\times3} +\end{bmatrix} +\f} + @section fej-sys-observability Linearized System Observability -The observability matrix of this linearized system is defined by: +System observability plays a crucial role in state estimation since it provides a deep insight about the system's +geometrical properties and determines the minimal measurement modalities needed. +With the state translation matrix, \f$\mathbf{\Phi}_{(k,0)}\f$, and measurement Jacobian at timestep *k*, +\f$\mathbf{H}_{k}\f$, the observability matrix of this linearized system is defined by: \f{align*}{ \mathcal{O}= @@ -114,8 +131,13 @@ The observability matrix of this linearized system is defined by: \end{bmatrix} \f} -where \f$\mathbf{H}_{k}\f$ is the measurement Jacobian at timestep *k* and \f$\mathbf{\Phi}_{(k,0)}\f$ is the compounded state transition (system Jacobian) matrix from timestep 0 to k. -For a given block row of this matrix, we have: +If \f$\mathcal{O}\f$ is of full column rank then the system is fully observable. +A nullspace of it (i.e., \f$ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)}\mathcal{N} = \mathbf 0 \f$) describes the unobservable +state subspace which can not be recovered with given measurements. +Note that while we simplify here and only consider the block row of the observability matrix when performing observability analysis, +we need to ensure that this nullsapce holds for the entire matrix (i.e., each block row share the same nullspace). +This can be achieved by ensuring that the nullspace does not vary with time, nor contain any measurements. +For the *k*-th block row of this \f$\mathcal{O}\f$ matrix, we have: \f{align*}{ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)} &= @@ -140,12 +162,10 @@ For a given block row of this matrix, we have: \boldsymbol\Gamma_4 &= \mathbf I_{3\times3} \f} - -We now verify the following nullspace which corresponds to the global yaw about gravity and global IMU and feature positions: - +It is straightforward to verify the right nullspace spans four directions, i.e.,: \f{align*}{ - \mathcal{N}_{vins} &= + \mathcal{N} &= \begin{bmatrix} {}^{I_{0}}_{G}\mathbf{R}{}^G\mathbf{g} & \mathbf 0_{3\times3} \\ -\lfloor {}^{G}\mathbf{p}_{I_0} \times\rfloor{}^G\mathbf{g} & \mathbf{I}_{3\times3} \\ @@ -154,39 +174,48 @@ We now verify the following nullspace which corresponds to the global yaw about \end{bmatrix} \f} -It is not difficult to verify that \f$ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)}\mathcal{N}_{vio} = \mathbf 0 \f$. -Thus this is a nullspace of the system, -which clearly shows that there are the four unobserable directions (global yaw and position) of visual-inertial systems. +where \f$ \mathcal{N} \f$ should be 4dof corresponding to global rotation about the gravity (yaw) and global translation of our visual-inertial systems. + + @section fej-fej First Estimate Jacobians +It has been showed that standard EKF based-VINS, which always computes the state translation matrix and the measurement +Jacobian using the current state estimates, has the global yaw orientation appear to be observable +and has an incorrectly reduced 3dof nullspace dimention @cite Hesch2013TRO. +This causes the filter mistakenly *gaining extra information* and becoming overconfident in the yaw. + +To solve this issue, the First-Estimate Jacobains (FEJ) @cite Huang2009ISER @cite Huang2010IJRR methodology can be applied. +It evaluates the linearized system state transition matrix and Jacobians at the same estimate (i.e., the first estimates) +over all time periods and thus ensures that the 4dof unobservable VINS subspace does not gain spurious information. +The application of FEJ is simple yet effective, let us consider how to modify the propagation and update linearizations. + + + +@subsection Propagation -The main idea of First-Estimate Jacobains (FEJ) approaches is to ensure that the state transition and Jacobian matrices are evaluated at correct linearization points such that the above observability analysis will hold true. -For those interested in the technical details please take a look at: @cite Huang2010IJRR and @cite Li2013IJRR. Let us first consider a small thought experiment of how the standard Kalman filter computes its state transition matrix. From a timestep zero to one it will use the current estimates from state zero forward in time. At the next timestep after it updates the state with measurements from other sensors, it will compute the state transition with the updated values to evolve the state to timestep two. This causes a miss-match in the "continuity" of the state transition matrix which when multiply sequentially should represent the evolution from time zero to time two. - \f{align*}{ \mathbf{\Phi}_{(k+1,k-1)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k-1|k-1}) \neq \mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k}) ~ \mathbf{\Phi}_{(k,k-1)}(\mathbf{x}_{k|k-1},\mathbf{x}_{k-1|k-1}) \f} - As shown above, we wish to compute the state transition matrix from the *k-1* timestep given all *k-1* measurements up until the current propagated timestep *k+1* given all *k* measurements. The right side of the above equation is how one would normally perform this in a Kalman filter framework. \f$\mathbf{\Phi}_{(k,k-1)}(\mathbf{x}_{k|k-1},\mathbf{x}_{k-1|k-1})\f$ corresponds to propagating from the *k-1* update time to the *k* timestep. -One would then normally perform the *k*'th update to the state and then propagate from this **updated** state to the newest timestep (i.e. the \f$ \mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k}) \f$ state transition matrix). -This clearly is different then if one was to compute the state transition from time *k-1* to the *k+1* timestep as the second state transition is evaluated at the different \f$\mathbf{x}_{k|k}\f$ linearization point! +One would then normally perform the *k*'th update to the state and then propagate from this **updated** state to the newest timestep +(i.e. the \f$ \mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k}) \f$ state transition matrix). +This clearly is different then if one was to compute the state transition from time *k-1* to the *k+1* timestep as +the second state transition is evaluated at the different \f$\mathbf{x}_{k|k}\f$ linearization point! To fix this, we can change the linearization point we evaluate these at: - - \f{align*}{ \mathbf{\Phi}_{(k+1,k-1)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k-1|k-1}) = \mathbf{\Phi}_{(k+1,k)}(\mathbf{x}_{k+1|k},\mathbf{x}_{k|k-1}) ~ @@ -194,13 +223,46 @@ To fix this, we can change the linearization point we evaluate these at: \f} + +@subsection Update + We also need to ensure that our measurement Jacobians match the linearization point of the state transition matrix. -Thus they also need to be evaluated at the \f$\mathbf{x}_{k|k-1}\f$ linearization point instead of the \f$\mathbf{x}_{k|k}\f$ that one would normally use. -This gives way to the name FEJ since we will evaluate the Jacobians at the same linearization point to ensure that the nullspace remains valid. -For example if we evaluated the \f$\mathbf H_k \f$ Jacobian with a different \f$ {}^G\mathbf{p}_f \f$ at each timestep then the nullspace would not hold past the first time instance. +Let us consider the simple case at timestamp *k* with current state estimate +\f$\hat{\mathbf{x}}_k\f$ gives the following observability matrix: + +\f{align*}{ +\hat{\mathcal{O}}= +\begin{bmatrix} +\hat{\mathbf{H}}_{0}\mathbf{\Phi}_{(0,0)} \\ +\vdots \\ +\hat{\mathbf{H}}_{k}\mathbf{\Phi}_{(k,0)} \\ +\end{bmatrix} +\f} + +It is easy to verify that \f$ \mathcal{N} \f$, derived from the previous section, is not valid for the whole entire matrix (i.e., \f$ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)}\mathcal{N} \neq \mathbf 0 \f$). +For example, the feature estimate \f$ {}^{G}\hat{\mathbf{p}}_{f}(t_0) \neq ... \neq {}^{G}\hat{\mathbf{p}}_{f}(t_k) \f$ in each row of \f$ \hat{\mathcal{O}}\f$ are different, thus the nullsace does not hold. +Specifically the first column \f$ \mathcal{N} \f$ is invalid, and causes the dimension of unobservable subspace to become 3 (i.e., \f$ \texttt{dim}(\hat{\mathcal{O}}) =3 \f$). +This will cause the filter to gain extra information along the yaw unobservable direction and hurt the estimation performance. +Specifically this leads to larger errors, erroneously smaller uncertainties, and inconsistency (see @cite Hesch2013TRO @cite Li2013IJRR for detailed proof and discussion). + +To assure \f$ \mathbf{H}_{k}\mathbf{\Phi}_{(k,0)}\mathcal{N} = \mathbf 0 \f$, one can fix the linearization point over all time. +A natural choice of state linearization point is to use the first state estimate \f$ \bar{\mathbf{x}}_k \f$. +Note that for the IMU state, its first state estimate will be \f$ {\mathbf{x}}_{k|k-1}\f$ which also match the linearization point in the state transition matrix. +For the features, the initial triangulated value (i.e., at the time we initialize the feature into state vector) is its first estimate. +As such, we can derive the linearized measurement update function as: + +\f{align*}{ + \tilde{\mathbf{z}}_{k+1} = \mathbf{z}_{k+1} - \mathbf{h}(\hat{\mathbf{x}}_k) \simeq \bar{\mathbf{H}}_{k} (\mathbf{x}_{k} -\hat{\mathbf{x}}_k ) + \mathbf{n}_k +\f} +where \f$ \mathbf{h}(\cdot) \f$ is the nonlinear measurement function and \f$ \bar{\mathbf{H}}_{k} \f$ is the First-Estimate Jacobian which only consist the first state estimates \f$ \bar{\mathbf{x}}_k \f$. +It is not difficult to confirm that the observability matrix \f$ \mathcal{O} \f$ with the FEJ state transition matrix and measurement Jacobians yields the correct 4dof unobservable nullspace. +As such, the system observability properties can be preserved. +While not utilizing the current (best) state estimates for linearization does introduce linearization errors, it has been shown that the inconsistencies far outweigh this. +Follow up works which have also tried to address this problem include FEJ2 @cite Chen2022ICRA and OC @cite Huang2010IJRR @cite Hesch2013TRO @cite Hesch2014IJRR. +Both of these works try to handle the problem of ensuring correct observability properties while compensating or leveraging the best estimates. - */ \ No newline at end of file +*/ diff --git a/docs/img/hxorder.ipe b/docs/img/hxorder.ipe new file mode 100644 index 000000000..8f9145a4d --- /dev/null +++ b/docs/img/hxorder.ipe @@ -0,0 +1,318 @@ + + + + + + + +0 0 m +-1 0.333 l +-1 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-1 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-0.8 0 l +-1 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-0.8 0 l +-1 -0.333 l +h + + + + +0.6 0 0 0.6 0 0 e +0.4 0 0 0.4 0 0 e + + + + +0.6 0 0 0.6 0 0 e + + + + + +0.5 0 0 0.5 0 0 e + + +0.6 0 0 0.6 0 0 e +0.4 0 0 0.4 0 0 e + + + + + +-0.6 -0.6 m +0.6 -0.6 l +0.6 0.6 l +-0.6 0.6 l +h +-0.4 -0.4 m +0.4 -0.4 l +0.4 0.4 l +-0.4 0.4 l +h + + + + +-0.6 -0.6 m +0.6 -0.6 l +0.6 0.6 l +-0.6 0.6 l +h + + + + + +-0.5 -0.5 m +0.5 -0.5 l +0.5 0.5 l +-0.5 0.5 l +h + + +-0.6 -0.6 m +0.6 -0.6 l +0.6 0.6 l +-0.6 0.6 l +h +-0.4 -0.4 m +0.4 -0.4 l +0.4 0.4 l +-0.4 0.4 l +h + + + + + + +-0.43 -0.57 m +0.57 0.43 l +0.43 0.57 l +-0.57 -0.43 l +h + + +-0.43 0.57 m +0.57 -0.43 l +0.43 -0.57 l +-0.57 0.43 l +h + + + + + +0 0 m +-1 0.333 l +-1 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-0.8 0 l +-1 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-0.8 0 l +-1 -0.333 l +h + + + + +-1 0.333 m +0 0 l +-1 -0.333 l + + + + +0 0 m +-1 0.333 l +-1 -0.333 l +h +-1 0 m +-2 0.333 l +-2 -0.333 l +h + + + + +0 0 m +-1 0.333 l +-1 -0.333 l +h +-1 0 m +-2 0.333 l +-2 -0.333 l +h + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +128 704 m +128 608 l +448 608 l +448 704 l +h + + +160 704 m +160 664 l + + +192 704 m +192 664 l + + +224 704 m +224 664 l + + +240 704 m +240 664 l + +x_1 +x_2 + +x_3 +x_4 + +x_5 +\mathbf{H}_1 +\mathbf{H}_{big} +\mathbf{H}_2 +\mathbf{H}_5 +\mathbf{0} +\mathbf{0} + +128 704 m +128 608 l +448 608 l +448 704 l +h + + +160 704 m +160 664 l + + +192 704 m +192 664 l + +x_5 +x_2 + +x_1 +\mathbf{H}_{small} +\mathrm{Hx\_order} +\mathbf{H}_1 +\mathbf{H}_2 +\mathbf{H}_5 + + diff --git a/docs/img/hxorder.png b/docs/img/hxorder.png new file mode 100644 index 000000000..1f5b9cfac Binary files /dev/null and b/docs/img/hxorder.png differ diff --git a/docs/propagation-analytical.dox b/docs/propagation-analytical.dox new file mode 100644 index 000000000..032f0aa04 --- /dev/null +++ b/docs/propagation-analytical.dox @@ -0,0 +1,976 @@ +/** + +@page propagation_analytical Analytical Propagation +@tableofcontents + +The state definition and general propagation equations are covered in the @ref propagation page. +Here we will cover the continuous-time integration of the state dynamics and inclusion of IMU intrinsic parameters. +First we will cover how we can propagate our mean forward, +afterwhich we cover how to derive the linearized error-state system propagation and approximations used. +Key references for analytical inertial integration include: + +- Indirect Kalman Filter for 3D Attitude Estimation @cite Trawny2005TR +- Consistency Analysis and Improvement of Vision-aided Inertial Navigation @cite Hesch2013TRO @cite Hesch2012TR +- High-precision, consistent EKF-based visual-inertial odometry @cite Li2013IJRR @cite Li2012TR +- Closed-form preintegration methods for graph-based visual-inertial navigation @cite Eckenhoff2019IJRR @cite Eckenhoff2018TR +- Analytic Combined IMU Integration (ACI^2) for Visual-Inertial Navigation @cite Yang2020ICRA @cite Yang2019TR_ACI +- Online Self-Calibration for Visual-Inertial Navigation: Models, Analysis and Degeneracy @cite Yang2023TRO + + + +@section imu_intrinsic_models IMU Intrinsic Models + +First we can re-write our IMU measurement models to find the the true (or corrected) +angular velocity \f${}^I\boldsymbol{\omega}(t)\f$ and linear acceleration \f${}^I\mathbf{a}(t)\f$. +This is how we will relate incoming measurements to IMU dynamics (see @ref imu_kinematic). + +\f{align*}{ +{}^I\boldsymbol{\omega}(t) + & = {}^I_{w}\mathbf{R} \mathbf{D}_w + \left( + {}^w\boldsymbol{\omega}_{m}(t) - \mathbf{T}_{g} {}^I\mathbf{a}(t) - \mathbf{b}_g(t) - \mathbf{n}_g(t) + \right) \\ +{}^I\mathbf{a}(t) + & = {}^I_a\mathbf{R} \mathbf{D}_a + \left( + {}^a\mathbf{a}_{m}(t) - \mathbf{b}_a(t) - \mathbf{n}_a(t) + \right) +\f} + + +where \f$\mathbf{D}_w = \mathbf{T}^{-1}_w\f$ and \f$\mathbf{D}_a = \mathbf{T}^{-1}_a\f$. +Note that here we have factored out the \f$- {}^I_G\mathbf{R}(t) {^G\mathbf{g}}\f$ gravity term (see below state evolution equations). +In practice, we calibrate \f$\mathbf{D}_a\f$, \f$\mathbf{D}_w\f$, \f${}^I_a\mathbf{R}\f$ (or \f${}^I_w\mathbf{R}\f$) and \f$\mathbf{T}_g\f$ +to prevent unneeded matrix inversions and transposes in the measurement equation. +We only calibrate either \f${}^I_w\mathbf{R}\f$ or \f${}^I_a\mathbf{R}\f$ since the base "inertial" +frame must coincide with one frame arbitrarily. +If both \f${}^I_w\mathbf{R}\f$ and \f${}^I_a\mathbf{R}\f$ are calibrated, +it will make the rotation between the IMU and camera unobservable due to over parameterization @cite Yang2020RSS @cite Yang2023TRO. +We define two different models of interested: + +- *KALIBR*: Contains \f$\mathbf{D}'_{w6}\f$, \f$\mathbf{D}'_{a6}\f$, \f${}^I_w\mathbf{R}\f$ and gravity sensitivity \f$\mathbf{T}_{g9}\f$. + This model follows IMU intrinsic calibration presented in @cite Rehder2017Sensors and the + output used in the open-source calibration toolbox [Kalibr](https://github.com/ethz-asl/kalibr) @cite Furgale2013IROS. + +- *RPNG*: Contains \f$\mathbf{D}_{w6}\f$, \f$\mathbf{D}_{a6}\f$, the rotation \f${}^I_a\mathbf{R}\f$, and gravity sensitivity \f$\mathbf{T}_{g9}\f$. + \f$\mathbf{D}_{a6}\f$ and \f$\mathbf{D}_{w6}\f$ are uptriangular matrices, and follows *imu2* analyzed in @cite Yang2020RSS @cite Yang2023TRO. + +It is important to note that one should use the *KALIBR* model if there is a non-negligible transformation between +the gyroscope and accelerometer (it is negligible for most MEMS IMUs, see @cite Li2014ICRA @cite Schneider2019Sensor) +since it assumes the accelerometer frame to be the inertial frame +and rigid bodies share the same angular velocity at all points. +We can define the following matrices: + +\f{align*}{ +\mathbf{D}_{*6} &= \begin{bmatrix} + d_{*1} & d_{*2} & d_{*4} \\ + 0 & d_{*3} & d_{*5} \\ + 0 & 0 & d_{*6} + \end{bmatrix},~~ +\mathbf{D}'_{*6} &= \begin{bmatrix} + d_{*1} & 0 & 0 \\ + d_{*2} & d_{*4} & 0 \\ + d_{*3} & d_{*5} & d_{*6} + \end{bmatrix},~~ +\mathbf{T}_{g9} &= \begin{bmatrix} + t_{g1} & t_{g4} & t_{g7} \\ + t_{g2} & t_{g5} & t_{g8} \\ + t_{g3} & t_{g6} & t_{g9} + \end{bmatrix} +\f} + + + + +@section analytical_state Full State with Calibration + +In the following derivations, we will compute the Jacobians for all the variables that might appear +in the IMU models, including scale/axis correction for gyroscope \f$\mathbf{D}_w\f$ (6 parameters), +scale/axis correction for accelerometer \f$\mathbf{D}_a\f$ (6 parameters), rotation from gyroscope to +IMU frame \f${}^I_w\mathbf{R}\f$, rotation from accelerometer to IMU frame \f${}^I_a\mathbf{R}\f$ and +gravity sensitivity \f$\mathbf{T}_g\f$ (9 parameters). +The state vector contains the IMU state and its intrinsics: + +\f{align*}{ +\mathbf{x}_I & = + \begin{bmatrix} + \mathbf{x}^{\top}_n & | & \mathbf{x}^{\top}_b & | & \mathbf{x}^{\top}_{in} + \end{bmatrix}^{\top} + \\ +& = \begin{bmatrix} + {}^I_G\bar{q}^{\top} & + {}^G\mathbf{p}^{\top}_{I} & + {}^G\mathbf{v}^{\top}_{I} & + | & + \mathbf{b}^{\top}_{g} & + \mathbf{b}^{\top}_{a} & + | & \mathbf{x}^{\top}_{in} + \end{bmatrix}^{\top} \\ +\mathbf{x}_{in} & = + \begin{bmatrix} + \mathbf{x}^{\top}_{Dw} & \mathbf{x}^{\top}_{Da} & {}^I_w\bar{q}^{\top} & {}^I_a\bar{q}^{\top} & \mathbf{x}^{\top}_{Tg} + \end{bmatrix}^{\top} +\f} + +where \f${}^I_G\bar{q}\f$ denotes the rotation matrix \f${}^I_G\mathbf{R}\f$ from \f$\{G\}\f$ to \f$\{I\}\f$. +\f${}^G\mathbf{p}_{I}\f$ and \f${}^G\mathbf{v}_I\f$ denote the IMU position and velocity in \f$\{G\}\f$. +\f$\mathbf{x}_n\f$ denotes the IMU navigation states and \f$\mathbf{x}_b\f$ denotes the IMU biases. +The IMU intrinsics, \f$\mathbf{x}_{in}\f$, contain the non-zero elements stored column-wise: + +\f{align*}{ +\mathbf{x}_{D*} & = + \begin{bmatrix} + d_{*1} & d_{*2} & d_{*3} & d_{*4} & d_{*5} & d_{*6} + \end{bmatrix}^{\top} \\ +\mathbf{x}_{Tg} & = + \begin{bmatrix} + t_{g1} & t_{g2} & t_{g3} & t_{g4} & t_{g5} & t_{g6} & t_{g7} & t_{g8} & t_{g9} + \end{bmatrix}^{\top} +\f} + + +@section analytical_prop Analytical State Mean Integration + +First we redefine the continuous-time integration equations mentioned in the @ref conti_prop section. +They are summarized as follows: + +\f{align*}{ +{}^{I_{k+1}}_{G}\mathbf{R} & + \triangleq\Delta \mathbf{R}_k ~{}^{I_k}_{G}\mathbf{R} \\ +{}^G\mathbf{p}_{I_{k+1}} & + \triangleq + {}^G\mathbf{p}_{I_k} + {}^G\mathbf{v}_{I_k}\Delta t_k + + {}^{I_k}_{G}\mathbf{R}^\top \Delta \mathbf{p}_{k} + - \frac{1}{2}{}^G\mathbf{g}\Delta t^2_k \\ +{}^G\mathbf{v}_{I_{k+1}} & + \triangleq {}^G\mathbf{v}_{I_k} + + {}^{I_k}_{G}\mathbf{R}^\top \Delta \mathbf{v}_{k} + - {}^G\mathbf{g}\Delta t_k \\ + \mathbf{b}_{g_{k+1}} & = \mathbf{b}_{g_{k}} + \int^{t_{k+1}}_{t_k} \mathbf{n}_{wg}(t_\tau) ~ d \tau \\ + \mathbf{b}_{a_{k+1}} & = \mathbf{b}_{a_{k}} + \int^{t_{k+1}}_{t_k} \mathbf{n}_{wa}(t_\tau) ~ d \tau +\f} + +where \f$\Delta t_k = t_{k+1} - t_{k}\f$, +\f${}^{I_k}_{I_\tau}\mathbf{R} = \exp(\int^{t_\tau}_{t_k} {}^{I}\boldsymbol{\omega}(t_u) ~ d u ) \f$, +\f$\exp(\cdot)\f$ is the \f$SO(3)\f$ matrix exponential @cite Chirikjian2011, and vectors are evaluated at +their subscript timesteps (e.g. \f${}^G\mathbf{v}_{I_k}={}^G\mathbf{v}_{I}(t_k)\f$). +We have the following integration components: + +\f{align*}{ + \Delta \mathbf{R}_k &= + {}^{I_{k+1}}_{I_k}\mathbf{R} + = \exp + \left(-\int^{t_{k+1}}_{t_{k}} {}^{I}\boldsymbol{\omega}(t_\tau) ~ d \tau\right) \\ +\Delta \mathbf{p}_{k} &= + \int^{t_{k+1}}_{t_{k}} \int^{s}_{t_{k}} {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau d s \triangleq \boldsymbol{\Xi}_2 \cdot {}^{I_k} \hat{\mathbf{a}} \\ +\Delta \mathbf{v}_{k} &= + \int^{t_{k+1}}_{t_{k}} + {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau \triangleq \boldsymbol{\Xi}_1 \cdot {}^{I_k} \hat{\mathbf{a}} +\f} + +where we define the following integrations: + +\f{align*}{ +\boldsymbol{\Xi}_1 & \triangleq + \int^{t_{k+1}}_{t_k} + \exp \left( + {}^{I_\tau}\hat{\boldsymbol{\omega}}\delta\tau + \right) + d{\tau} \\ +\boldsymbol{\Xi}_2 &\triangleq + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp \left( + {}^{I_\tau}\hat{\boldsymbol{\omega}}\delta\tau + \right) + d\tau ds +\f} + +where \f$\delta\tau = t_{\tau} - t_{k}\f$ and the \f$\boldsymbol{\Xi}_1\f$ and \f$\boldsymbol{\Xi}_2\f$ integration components + can be evaluated either analytically (see @ref analytical_integration_components) or numerically using RK4. +\f${}^{I_k}\hat{\boldsymbol{\omega}}\f$ and \f${}^{I_k}\hat{\mathbf{a}}\f$ are defined as (dropping the timestamp): + +\f{align*}{ + {}^I\hat{\boldsymbol{\omega}} & = {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_{w} {}^w\hat{\boldsymbol{\omega}} \\ + {}^w\hat{\boldsymbol{\omega}} & = {}^w\boldsymbol{\omega}_m - \hat{\mathbf{T}}_g {}^I\hat{\mathbf{a}}-\hat{\mathbf{b}}_g + = + \begin{bmatrix} + {}^w\hat{w}_1 & {}^w\hat{w}_2 & {}^w\hat{w}_3 + \end{bmatrix}^{\top} \\ + {}^I\hat{\mathbf{a}} &= {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_{a} {}^a\hat{\mathbf{a}} \\ + {}^a\hat{\mathbf{a}} & = {}^a\mathbf{a}_m - \hat{\mathbf{b}}_a + = + \begin{bmatrix} + {}^a\hat{a}_1 & {}^a\hat{a}_2 & {}^a\hat{a}_3 + \end{bmatrix}^{\top} +\f} + +Under the assumption of constant \f${}^{I_k}\hat{\boldsymbol{\omega}}\f$ and \f${}^{I_k}\hat{\mathbf{a}}\f$, +the above equations are also constant over the time interval. +The mean propagation for the new state at \f$t_{k+1}\f$ can be written after taking the expectation: + +\f{align*}{ + {}^{I_{k+1}}_G\hat{\mathbf{R}} & + \simeq + \Delta \mathbf{R}_k + {}^{I_k}_G\hat{\mathbf{R}} + \\ + {}^G\hat{\mathbf{p}}_{I_{k+1}} & \simeq + {}^{G}\hat{\mathbf{p}}_{I_k} + {}^G\hat{\mathbf{v}}_{I_k}\Delta t_k + + + {}^{I_k}_G\hat{\mathbf{R}}^\top + \Delta \hat{\mathbf{p}}_k + - \frac{1}{2}{}^G\mathbf{g}\Delta t^2_k + \\ + {}^G\hat{\mathbf{v}}_{I_{k+1}} & \simeq {}^{G}\hat{\mathbf{v}}_{I_k} + + {}^{I_k}_G\hat{\mathbf{R}}^\top + \Delta \hat{\mathbf{v}}_k - {}^G\mathbf{g}\Delta t_k + \\ + \hat{\mathbf{b}}_{g_{k+1}} & = \hat{\mathbf{b}}_{g_k} + \\ + \hat{\mathbf{b}}_{a_{k+1}} & = \hat{\mathbf{b}}_{a_k} +\f} + + + + + +@section analytical_linearization Model Linearization Derivations + + +@subsection analytical_linearization_error_stats Error States Definitions + +We first remind the reader of the error states used throughout the system. +It is crucial that all error states are consistent between propagation and update. +OpenVINS has a type system (see @ref ov_type and @ref dev-index-types) for which Jacobians need to be derived from. +We have the following pose (orientation + position) errors (based on @ref ov_type::PoseJPL) + + +\f{align*}{ +{}^{I}_G \mathbf{R} &\simeq \exp(-\delta \boldsymbol{\theta}) {}^{I}_G \hat{\mathbf{R}} \\ +&\simeq (\mathbf{I} - \lfloor \delta \boldsymbol{\theta} \rfloor) {}^{I}_G \hat{\mathbf{R}} \\ +{}^{G}\mathbf{p}_I &= {}^{G}\hat{\mathbf{p}}_I + {}^{G}\tilde{\mathbf{p}}_I +\f} + +For all other vectors (such as IMU intrinsics etc), we leverage simple additive error (based on @ref ov_type::Vec) + +\f{align*}{ +\mathbf{v} &= \hat{\mathbf{v}} +\tilde{\mathbf{v}} +\f} + + + + +@subsection analytical_linearization_imu IMU Reading Linearization +We first define the \f${}^{I_k}\tilde{\boldsymbol{\omega}}\f$ and \f${}^{I_k}\tilde{\mathbf{a}}\f$ error states. +For \f${}^I\hat{\mathbf{a}}\f$ and \f${}^I\tilde{\mathbf{a}}\f$, we have: + +\f{align*}{ + {}^I{\mathbf{a}} & = {}^I\hat{\mathbf{a}} + {}^I\tilde{\mathbf{a}} \\ + &\simeq + {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a + \left( + {}^a\mathbf{a}_m - \hat{\mathbf{b}}_a + \right) + + + {}^I_a\hat{\mathbf{R}} \mathbf{H}_{Da} \tilde{\mathbf{x}}_{Da} \notag \\ + &+ + \lfloor {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a + \left( + {}^a\mathbf{a}_m - \hat{\mathbf{b}}_a + \right) \rfloor + \delta \boldsymbol{\theta}_{Ia} + - {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a \tilde{\mathbf{b}}_a + - {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a {\mathbf{n}}_a + \\ + {}^I\hat{\mathbf{a}} & = {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a + \left( + {}^a\mathbf{a}_m - \hat{\mathbf{b}}_a + \right) + \\ + {}^I\tilde{\mathbf{a}} & \simeq + {}^I_a\hat{\mathbf{R}} \mathbf{H}_{Da} \tilde{\mathbf{x}}_{Da} + + + \lfloor {}^I\hat{\mathbf{a}} \rfloor + \delta \boldsymbol{\theta}_{Ia} + - {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a \tilde{\mathbf{b}}_a + - {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a {\mathbf{n}}_a +\f} + +For \f${}^I\hat{\boldsymbol{\omega}}\f$ and \f${}^I\tilde{\boldsymbol{\omega}}\f$, we have: + +\f{align*}{ + {}^{I}\boldsymbol{\omega} & = {}^{I}\hat{\boldsymbol{\omega}} + + {}^{I}\tilde{\boldsymbol{\omega}} + \\ + & + \simeq + {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w + \left( + {}^w\boldsymbol{\omega}_m - \hat{\mathbf{T}}_g {}^I\hat{\mathbf{a}}-\hat{\mathbf{b}}_g + \right) + + \lfloor + {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w + \left( + {}^w\boldsymbol{\omega}_m - \hat{\mathbf{T}}_g {}^I\hat{\mathbf{a}}-\hat{\mathbf{b}}_g + \right) + \rfloor \delta \boldsymbol{\theta}_{Iw} + \\ + & + + {}^I_w\hat{\mathbf{R}} \mathbf{H}_{Dw} \tilde{\mathbf{x}}_{Dw} + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w + \left( + \hat{\mathbf{T}}_g {}^I\tilde{\mathbf{a}} + + \mathbf{H}_{Tg}\tilde{\mathbf{x}}_{Tg} + \right) + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w + \left( + \tilde{\mathbf{b}}_g + +\mathbf{n}_g + \right) + \notag + \\ + {}^{I}\hat{\boldsymbol{\omega}} & = + {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w + \left( + {}^w\boldsymbol{\omega}_m - \hat{\mathbf{T}}_g {}^I\hat{\mathbf{a}}-\hat{\mathbf{b}}_g + \right) + \\ + {}^{I}\tilde{\boldsymbol{\omega}} & \simeq + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \tilde{\mathbf{b}}_g + + {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \hat{\mathbf{T}}_g {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a \tilde{\mathbf{b}}_a + + {}^I_w\hat{\mathbf{R}}\mathbf{H}_{Dw}\tilde{\mathbf{x}}_{Dw} \notag \\ + & + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \hat{\mathbf{T}}_g {}^I_a\hat{\mathbf{R}}\mathbf{H}_{Da}\tilde{\mathbf{x}}_{Da} + + \lfloor {}^I\hat{\boldsymbol{\omega}} \rfloor \delta \boldsymbol{\theta}_{Iw} + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \hat{\mathbf{T}}_g + \lfloor {}^I\hat{\mathbf{a}} \rfloor \delta \boldsymbol{\theta}_{Ia} \notag \\ + & + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \mathbf{H}_{Tg} + \tilde{\mathbf{x}}_{Tg} - {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w \mathbf{n}_g + + {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \hat{\mathbf{T}}_g {}^I_a\hat{\mathbf{R}}\hat{\mathbf{D}}_a \mathbf{n}_a +\f} + +where we need to define \f$\mathbf{H}_{Dw}\f$, \f$\mathbf{H}_{Da}\f$ and \f$\mathbf{H}_{Tg}\f$. +For the *KALIBR* model, we have: + +\f{align*}{ +\mathbf{H}_{Dw} & = + \begin{bmatrix} + {}^w\hat{w}_1 \mathbf{I}_3 & {}^w\hat{w}_2\mathbf{e}_2 & {}^w\hat{w}_2\mathbf{e}_3 & {}^w\hat{w}_3 \mathbf{e}_3 + \end{bmatrix} \\ +\mathbf{H}_{Da} & = + \begin{bmatrix} + {}^a\hat{a}_1 \mathbf{I}_3 & + & {}^a\hat{a}_2\mathbf{e}_2 & {}^a\hat{a}_2\mathbf{e}_3 & {}^a\hat{a}_3\mathbf{e}_3 + \end{bmatrix} \\ +\mathbf{H}_{Tg} & = + \begin{bmatrix} + {}^I\hat{a}_1 \mathbf{I}_3 & {}^I\hat{a}_2 \mathbf{I}_3 & {}^I\hat{a}_3 \mathbf{I}_3 + \end{bmatrix} +\f} + +For the *RPNG* model, we have: + +\f{align*}{ +\mathbf{H}_{Dw} & = + \begin{bmatrix} + {}^w\hat{w}_1\mathbf{e}_1 & {}^w\hat{w}_2\mathbf{e}_1 & {}^w\hat{w}_2\mathbf{e}_2 & {}^w\hat{w}_3 \mathbf{I}_3 + \end{bmatrix} \\ +\mathbf{H}_{Da} & = + \begin{bmatrix} + {}^a\hat{a}_1\mathbf{e}_1 & {}^a\hat{a}_2\mathbf{e}_1 & {}^a\hat{a}_2\mathbf{e}_2 & {}^a\hat{a}_3 \mathbf{I}_3 + \end{bmatrix} \\ +\mathbf{H}_{Tg} & = + \begin{bmatrix} + {}^I\hat{a}_1 \mathbf{I}_3 & {}^I\hat{a}_2 \mathbf{I}_3 & {}^I\hat{a}_3 \mathbf{I}_3 + \end{bmatrix} +\f} + +By summarizing the above equations, we have: + +\f{align*}{ + \begin{bmatrix} + {}^{I_k}\tilde{\boldsymbol{\omega}} \\ + {}^{I_k}\tilde{\mathbf{a}} + \end{bmatrix} + & = + \begin{bmatrix} + \mathbf{H}_b & \mathbf{H}_{in} + \end{bmatrix} + \begin{bmatrix} + \tilde{\mathbf{x}}_{b} \\ + \tilde{\mathbf{x}}_{in} + \end{bmatrix} + + + \mathbf{H}_n + \begin{bmatrix} + \mathbf{n}_{dg} \\ + \mathbf{n}_{da} + \end{bmatrix} +\f} + +where we have defined: + +\f{align*}{ +\mathbf{H}_b & = \mathbf{H}_n = + \begin{bmatrix} + -{}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w & + {}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w + \hat{\mathbf{T}}_g {}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_a \\ + \mathbf{0}_3 & -{}^I_a\hat{\mathbf{R}}\hat{\mathbf{D}}_a + \end{bmatrix} \\ +\mathbf{H}_{in} & = + \begin{bmatrix} + \mathbf{H}_w & \mathbf{H}_a & \mathbf{H}_{Iw} & \mathbf{H}_{Ia} & \mathbf{H}_{g} + \end{bmatrix} +\f} +\f{align*}{ + \mathbf{H}_w & = \frac{\partial [{}^{I_k}\tilde{\boldsymbol{\omega}} ~{}^{I_k}\tilde{\mathbf{a}}]}{\partial \mathbf{x}_{Dw} } = + \begin{bmatrix} + {}^I_w\hat{\mathbf{R}} \mathbf{H}_{Dw} \\ + \mathbf{0}_3 + \end{bmatrix} + \\ + \mathbf{H}_a & = \frac{\partial [{}^{I_k}\tilde{\boldsymbol{\omega}} ~{}^{I_k}\tilde{\mathbf{a}}]}{\partial \mathbf{x}_{Da} } = + \begin{bmatrix} + - {}^I_w\hat{\mathbf{R}}\hat{\mathbf{D}}_w \hat{\mathbf{T}}_g {}^I_a\hat{\mathbf{R}}\mathbf{H}_{Da} \\ + {}^I_a\hat{\mathbf{R}}\mathbf{H}_{Da} + \end{bmatrix} + \\ + \mathbf{H}_{Iw} & = \frac{\partial [{}^{I_k}\tilde{\boldsymbol{\omega}} ~{}^{I_k}\tilde{\mathbf{a}}]}{\partial {}^I_w\delta \boldsymbol{\theta} } = + \begin{bmatrix} + \lfloor {}^{I}\hat{\boldsymbol{\omega}} \rfloor \\ + \mathbf{0}_3 + \end{bmatrix} + \\ + \mathbf{H}_{Ia} & = \frac{\partial [{}^{I_k}\tilde{\boldsymbol{\omega}} ~{}^{I_k}\tilde{\mathbf{a}}]}{\partial {}^I_a\delta \boldsymbol{\theta} } = + \begin{bmatrix} + -{}^I_a\hat{\mathbf{R}} \hat{\mathbf{D}}_w \hat{\mathbf{T}}_g + \lfloor {}^{I}\hat{\mathbf{a}} \rfloor \\ + \lfloor {}^I\hat{\mathbf{a}} \rfloor + \end{bmatrix} + \\ + \mathbf{H}_{g} & = \frac{\partial [{}^{I_k}\tilde{\boldsymbol{\omega}} ~{}^{I_k}\tilde{\mathbf{a}}]}{\partial \mathbf{x}_{Tg} } = + \begin{bmatrix} + -{}^I_w\hat{\mathbf{R}} \hat{\mathbf{D}}_w \mathbf{H}_{Tg} \\ + \mathbf{0}_3 + \end{bmatrix} +\f} + + +@subsection analytical_linearization_state Inertial State Linearization +We then linearize \f$\Delta \mathbf{R}_k\f$, \f$\Delta\mathbf{p}_k\f$ and \f$\Delta \mathbf{v}_k\f$. +For the \f$\Delta \mathbf{R}_k\f$, we can get: + +\f{align*}{ + \Delta \mathbf{R}_k &= + \exp(-{}^{I_{k}}\boldsymbol{\omega}\Delta t_k) + \\ + &= \exp(-({}^{I_{k}}\hat{\boldsymbol{\omega}} + + {}^{I_{k}}\tilde{\boldsymbol{\omega}})\Delta t_k) + \\ + &\simeq + \exp(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k) + \cdot + \exp(-\mathbf{J}_r (-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k) + {}^{I_{k}}\tilde{\boldsymbol{\omega}} + \Delta t_k) \\ + &\simeq + \underbrace{\exp(-\Delta \hat{\mathbf{R}}_k\mathbf{J}_r (-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k) + {}^{I_{k}}\tilde{\boldsymbol{\omega}} + \Delta t_k)}_{\Delta \tilde{\mathbf{R}}_k} + \exp(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k) +\f} + +For the integration of \f$\Delta \mathbf{p}_k\f$, we get: + +\f{align*}{ + \Delta \mathbf{p}_{k} & = + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\boldsymbol{\omega}\delta \tau) d \tau d s + \cdot + {}^{I_{k}} \mathbf{a} + \\ + & = + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp(({}^{I_{k}} \hat{\boldsymbol{\omega}} + + {}^{I_{k}}\tilde{\boldsymbol{\omega}})\delta \tau) d \tau d s + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) \exp(\mathbf{J}_r({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) {}^{I_{k}}\tilde{\boldsymbol{\omega}}\delta \tau) d \tau d s + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) (\mathbf{I} + \lfloor + {\mathbf{J}_r({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) {}^{I_{k}}\tilde{\boldsymbol{\omega}}\delta \tau} \rfloor) d \tau d s + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) d \tau d s + \cdot + {}^{I_{k}} \hat{\mathbf{a}} + \notag + \\ + & + ~~~ - + \underbrace{ + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + \lfloor {}^{I_{k}} \hat{\mathbf{a}} \rfloor + \mathbf{J}_r ({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + d \tau d s}_{\boldsymbol{\Xi}_4} + \cdot + {}^{I_{k}}\tilde{\boldsymbol{\omega}} + \notag + \\ + & + ~~~ + + \underbrace{\int^{t_{k+1}}_{t_k} \int^{s}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + d \tau d s }_{\boldsymbol{\Xi}_2} + \cdot + {}^{I_{k}}\tilde{\mathbf{a}} + \notag + \\& + = + \Delta \hat{\mathbf{p}}_{k} \underbrace{- + \boldsymbol{\Xi}_4 {}^{I_{k}}\tilde{\boldsymbol{\omega}} + +\boldsymbol{\Xi}_2 {}^{I_{k}}\tilde{\mathbf{a}}}_{\Delta \tilde{\mathbf{p}}_k} + \\ + & = \Delta \hat{\mathbf{p}}_{k} + \Delta \tilde{{\mathbf{p}}}_{k} +\f} + +For the integration of \f$\Delta \mathbf{v}_k\f$, we get: + +\f{align*}{ + \Delta \mathbf{v}_{k} + & = + \int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\boldsymbol{\omega}\delta \tau) d \tau + \cdot + {}^{I_{k}} \mathbf{a} + \\ + & = + \int^{t_{k+1}}_{t_k} + \exp(({}^{I_{k}} \hat{\boldsymbol{\omega}} + {}^{I_{k}}\tilde{\boldsymbol{\omega}})\delta \tau) d \tau + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) \exp(\mathbf{J}_r({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) {}^{I_{k}}\tilde{\boldsymbol{\omega}}\delta \tau) d \tau + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) (\mathbf{I} + \lfloor + {\mathbf{J}_r({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) {}^{I_{k}}\tilde{\boldsymbol{\omega}}\delta \tau} \rfloor) d \tau + \cdot + ({}^{I_{k}}\hat{\mathbf{a}} + {}^{I_{k}}\tilde{\mathbf{a}}) + \\ + & \simeq + \int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) d \tau + \cdot + {}^{I_{k}} \hat{\mathbf{a}} + \notag + \\ + & + ~~~ - + \underbrace{ + \int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + \lfloor {}^{I_{k}} \hat{\mathbf{a}} \rfloor + \mathbf{J}_r ({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + d \tau }_{\boldsymbol{\Xi}_3} + \cdot + {}^{I_{k}}\tilde{\boldsymbol{\omega}} + \notag + \\ + & + ~~~ + + \underbrace{\int^{t_{k+1}}_{t_k} + \exp({}^{I_{k}}\hat{\boldsymbol{\omega}}\delta \tau) + d \tau }_{\boldsymbol{\Xi}_1} + \cdot + {}^{I_{k}}\tilde{\mathbf{a}} + \notag + \\& + = + \Delta \hat{\mathbf{v}}_{k} \underbrace{- + \boldsymbol{\Xi}_3 {}^{I_{k}}\tilde{\boldsymbol{\omega}} + +\boldsymbol{\Xi}_1 {}^{I_{k}}\tilde{\mathbf{a}}}_{\Delta \tilde{\mathbf{v}}_k} + \\ + & = \Delta \hat{\mathbf{v}}_{k} + \Delta \tilde{{\mathbf{v}}}_{k} +\f} + +where \f$\delta \tau = t_\tau - t_k\f$ and +\f$\boldsymbol{\Xi}_i, i=1\ldots4\f$ are shown in the @ref analytical_integration_components section. +\f$\mathbf {J}_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k)\f$ is the right Jacobian of \f${SO}(3)\f$ +[see @ref ov_core::Jr_so3() @cite Barfoot2017]. +In summary, we have the following linearized integration components (see @ref analytical_integration_components): + +\f{align*}{ + \Delta \mathbf{R}_k & = + \Delta \tilde{\mathbf{R}}_k \Delta \hat{\mathbf{R}}_k + \triangleq + \exp \left( + -\Delta \hat{\mathbf{R}}_k + \mathbf{J}_r (\Delta \boldsymbol{\theta}_k) + {}^{I_k}\tilde{\boldsymbol{\omega}}\Delta t_k + \right) + \Delta \hat{\mathbf{R}}_k + \\ + \Delta \mathbf{p}_k & = + \Delta \hat{\mathbf{p}}_k + \Delta \tilde{\mathbf{p}}_k + \triangleq + \Delta \hat{\mathbf{p}}_k -\boldsymbol{\Xi}_4 {}^{I_k}\tilde{\boldsymbol{\omega}} + + \boldsymbol{\Xi}_2 {}^{I_k}\tilde{\mathbf{a}} + \\ + \Delta \mathbf{v}_k & = + \Delta \hat{\mathbf{v}}_k + \Delta \tilde{\mathbf{v}}_k + \triangleq + \Delta \hat{\mathbf{v}}_k -\boldsymbol{\Xi}_3 {}^{I_k}\tilde{\boldsymbol{\omega}} + + \boldsymbol{\Xi}_1 {}^{I_k}\tilde{\mathbf{a}} +\f} + +where \f$\mathbf{J}_r\left(\Delta \boldsymbol{\theta}_k\right) \triangleq \mathbf{J}_r \left(-{}^{I_k}\hat{\boldsymbol{\omega}}_k\Delta t_k \right)\f$ and \f$\boldsymbol{\Xi}_3\f$ and \f$\boldsymbol{\Xi}_4\f$ are defined as: + +\f{align*}{ + \boldsymbol{\Xi}_3 & = + \int^{t_{k+1}}_{t_k} {}^{I_k}_{I_{\tau}}\mathbf{R} + \lfloor {}^{I_{\tau}}\mathbf{a} \rfloor + \mathbf{J}_r \left( {}^{I_k}\hat{\boldsymbol{\omega}}_k \delta \tau \right) \delta \tau + ~d{\tau} + \\ + \boldsymbol{\Xi}_4 & = + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} {}^{I_k}_{I_{\tau}}\mathbf{R} + \lfloor {}^{I_{\tau}}\mathbf{a} \rfloor + \mathbf{J}_r \left( {}^{I_k}\hat{\boldsymbol{\omega}}_k \delta \tau \right) \delta \tau + ~d{\tau}ds +\f} + + + + + +@section analytical_linearization_sys Linearized Error-state Evolution + +Hence, the linearized IMU system is: + +\f{align*}{ + \delta \boldsymbol{\theta}_{k+1} & \simeq + \Delta \hat{\mathbf{R}}_k \delta \boldsymbol{\theta}_k + + \Delta \hat{\mathbf{R}}_k\mathbf{J}_r \left( \Delta \boldsymbol{\theta}_k \right) \Delta t_k {}^{I_k}\tilde{\boldsymbol{\omega}} + \\ + {}^G\tilde{\mathbf{p}}_{I_{k+1}} & \simeq + {}^G\tilde{\mathbf{p}}_{I_{k}}+{}^G\tilde{\mathbf{v}}_k\Delta t_k + - {}^{I_{k}}_G\hat{\mathbf{R}}^\top + \lfloor \Delta \hat{\mathbf{p}}_k \rfloor + \delta \boldsymbol{\theta}_k + + {}^{I_{k}}_G\hat{\mathbf{R}}^\top \left( -\boldsymbol{\Xi}_4 {}^{I_k}\tilde{\boldsymbol{\omega}} + \boldsymbol{\Xi}_2 {}^{I_k}\tilde{\mathbf{a}} \right) + \\ + {}^G\tilde{\mathbf{v}}_{I_{k+1}} & \simeq + {}^G\tilde{\mathbf{v}}_{I_{k}} + - {}^{I_{k}}_G\hat{\mathbf{R}}^\top + \lfloor \Delta \hat{\mathbf{v}}_k \rfloor + \delta \boldsymbol{\theta}_k + + {}^{I_{k}}_G\hat{\mathbf{R}}^\top \left( -\boldsymbol{\Xi}_3 {}^{I_k}\tilde{\boldsymbol{\omega}} + \boldsymbol{\Xi}_1 {}^{I_k}\tilde{\mathbf{a}} \right) +\f} + +The overall linearized error state system is: + +\f{align*}{ + \tilde{\mathbf{x}}_{I_{k+1}} & \simeq \boldsymbol{\Phi}_{I{(k+1,k)}}\tilde{\mathbf{x}}_{I_k} + \mathbf{G}_{Ik}\mathbf{n}_{dk} + \\ + \boldsymbol{\Phi}_{I(k+1,k)} & = + \begin{bmatrix} + \boldsymbol{\Phi}_{nn} & + \boldsymbol{\Phi}_{wa} \mathbf{H}_b & + \boldsymbol{\Phi}_{wa} \mathbf{H}_{in} \\ + \mathbf{0}_{6\times 9} & + \mathbf{I}_6 & + \mathbf{0}_{6\times 24} \\ + \mathbf{0}_{24\times 9} & \mathbf{0}_{24\times 6} & \mathbf{I}_{24} + \end{bmatrix} + \\ + \mathbf{G}_{Ik} & = + \begin{bmatrix} + \boldsymbol{\Phi}_{wa} \mathbf{H}_n & \mathbf{0}_{9\times 6} \\ + \mathbf{0}_{6} & \mathbf{I}_6 \Delta t_k \\ + \mathbf{0}_{24\times 6} & \mathbf{0}_{24\times 6} + \end{bmatrix} +\f} + +where \f$\boldsymbol{\Phi}_{I(k+1,k)}\f$ and \f$\mathbf{G}_{Ik}\f$ are the state transition matrix and noise Jacobians for IMU dynamic model; +\f$\mathbf{H}_{b}$, $\mathbf{H}_{in}$ and $\mathbf{H}_n\f$ are Jacobians related to bias, IMU intrinsics and noises, which can be found from previous derivations; +\f$\mathbf{n}_{dk} = [\mathbf{n}^{\top}_{dg} ~ \mathbf{n}^{\top}_{da} ~ \mathbf{n}^{\top}_{dwg} ~ \mathbf{n}^{\top}_{dwa}]^\top\f$ is the discrete-time IMU noises; +\f$\boldsymbol{\Phi}_{nn}\f$ and \f$\boldsymbol{\Phi}_{wa}\f$ can be computed as: + +\f{align*}{ + \boldsymbol{\Phi}_{nn} & = + \begin{bmatrix} + \Delta \hat{\mathbf{R}}_k & \mathbf{0}_3 & \mathbf{0}_3 \\ + -{}^{I_k}_G\hat{\mathbf{R}}^\top\lfloor \Delta \hat{\mathbf{p}}_k \rfloor & \mathbf{I}_3 & \mathbf{I}_3 \Delta t_k \\ + -{}^{I_k}_G\hat{\mathbf{R}}^\top\lfloor \Delta \hat{\mathbf{v}}_k \rfloor & \mathbf{0}_3 & \mathbf{I}_3 + \end{bmatrix} + \\ + \boldsymbol{\Phi}_{wa} & = + \begin{bmatrix} + \Delta \hat{\mathbf{R}}_k\mathbf{J}_r(\Delta \boldsymbol{\theta}_k) \Delta t_k & \mathbf{0}_3 \\ + -{}^{I_k}_G\hat{\mathbf{R}}^\top\boldsymbol{\Xi}_4 & {}^{I_k}_G\hat{\mathbf{R}}^\top\boldsymbol{\Xi}_2 \\ + -{}^{I_k}_G\hat{\mathbf{R}}^\top\boldsymbol{\Xi}_3 & {}^{I_k}_G\hat{\mathbf{R}}^\top\boldsymbol{\Xi}_1 + \end{bmatrix} +\f} + + +@m_class{m-note m-frame} + +@par How to apply FEJ to ensure consistency? +When using First-Estimate Jacobians (see @ref fej for an overview) for the state transition matrix, +we need to replace \f$\Delta \hat{\mathbf{R}}_k\f$, \f$\Delta \hat{\mathbf{p}}_k\f$ and \f$\Delta \hat{\mathbf{v}}_k\f$ as: +\f{align*}{ + \Delta \hat{\mathbf{R}}_k & = + {}^{I_{k+1}}_{G}\hat{\mathbf{R}} + {}^{I_k}_G\hat{\mathbf{R}}^{\top} + \\ + \Delta \hat{\mathbf{p}}_k & = + {}^{I_k}_G\hat{\mathbf{R}} + ( + {}^G\hat{\mathbf{p}}_{I_{k+1}} - + {}^{G}\hat{\mathbf{p}}_{I_k} - {}^G\hat{\mathbf{v}}_{I_k}\Delta t_k + + \frac{1}{2}{}^G\mathbf{g}\Delta t^2_k + ) + \\ + \Delta \hat{\mathbf{v}}_k + & = + {}^{I_k}_G\hat{\mathbf{R}} + ( + {}^G\hat{\mathbf{v}}_{I_{k+1}} - + {}^{G}\hat{\mathbf{v}}_{I_k} + + {}^G\mathbf{g}\Delta t_k + ) +\f} +This ensure the semi-group property of the state transition matrix by ensuring that the previous timestep is +evaluated at the same linearization point (their estimate before update / their estimate when they were first initialized into the state). + + + + +Note that \f$\mathbf{n}_{d*}\sim \mathcal{N}(\mathbf{0}, \frac{\sigma^2_{*}\mathbf{I}_3}{\Delta t_k})\f$ and hence the covariance for \f$\mathbf{n}_{dI}\f$ can be written as: + +\f{align*}{ + \mathbf{Q}_{dI} & = + \begin{bmatrix} + \frac{\sigma^2_g}{\Delta t_k} \mathbf{I}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 \\ + \mathbf{0}_3 & \frac{\sigma^2_a}{\Delta t_k} \mathbf{I}_3 & \mathbf{0}_3 & \mathbf{0}_3 \\ + \mathbf{0}_3 & \mathbf{0}_3 & \frac{\sigma^2_{wg}}{\Delta t_k} \mathbf{I}_3 & \mathbf{0}_3 \\ + \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \frac{\sigma^2_{wa}}{\Delta t_k} \mathbf{I}_3 + \end{bmatrix} +\f} + +Finally, the covariance propagation can be derived as: + +\f{align*}{ + \mathbf{P}_{k+1} & = + \boldsymbol{\Phi}_{I(k+1,k)} \mathbf{P}_k + \boldsymbol{\Phi}^{\top}_{I(k+1,k)} + + \mathbf{G}_{Ik} + \mathbf{Q}_{dI} + \mathbf{G}^{\top}_{Ik} +\f} + + + + +@section analytical_integration_components Integration Component Definitions + +When deriving the components, we drop the subscripts for simplicity. +As we define angular velocity \f$\hat{\omega} = ||{{}^I\hat{\boldsymbol{\omega}}}||\f$, +rotation axis \f$\mathbf{k} = {{{}^I\hat{\boldsymbol{\omega}}}}/{||{{}^I\hat{\boldsymbol{\omega}}}||}\f$, +and the small interval we are integrating over as \f$\delta t\f$. +The first integration we need is: + +\f{align*}{ + \boldsymbol{\Xi}_1 & = + \mathbf{I}_3 \delta t + + \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}} + \lfloor \hat{\mathbf{k}} \rfloor + + \left(\delta t - \frac{\sin (\hat{\omega} \delta t)}{\hat{\omega}}\right) \lfloor \hat{\mathbf{k}} \rfloor^2 +\f} + +The second integration we need is: + +\f{align*}{ + \boldsymbol{\Xi}_2 & = + \frac{1}{2} \delta t^2 \mathbf{I}_3 + + \frac{\hat{\omega} \delta t - \sin (\hat{\omega} \delta t)}{\hat{\omega}^2}\lfloor \hat{\mathbf{k}} \rfloor + + + \left( + \frac{1}{2} \delta t^2 - + \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}^2} + \right) \lfloor \hat{\mathbf{k}} \rfloor ^2 +\f} + +The third integration can be written as: + +\f{align*}{ + \boldsymbol{\Xi}_3 + &= + \frac{1}{2}\delta t^2 + \lfloor \hat{\mathbf{a}} \rfloor + + \frac{\sin (\hat{\omega} \delta t_i) - \hat{\omega} \delta t }{\hat{\omega}^2} + \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor + \notag \\ + & + + \frac{\sin (\hat{\omega} \delta t) - \hat{\omega} \delta t \cos (\hat{\omega} \delta t) }{\hat{\omega}^2} + \lfloor \hat{\mathbf{k}} \rfloor\lfloor\hat{\mathbf{a}} \rfloor + \notag \\ + & + + \left( + \frac{1}{2} \delta t^2 - \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}^2} + \right) + \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor ^2 + \notag \\ + & + + + \left( + \frac{1}{2} \delta t^2 + \frac{1 - \cos (\hat{\omega} \delta t) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t) }{\hat{\omega}^2} + \right) + \lfloor \hat{\mathbf{k}} \rfloor ^2 \lfloor\hat{\mathbf{a}} \rfloor + \notag \\ + & + + + \left( + \frac{1}{2} \delta t^2 + \frac{1 - \cos (\hat{\omega} \delta t) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t) }{\hat{\omega}^2} + \right) \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + \notag \\ + & + - \frac{ 3 \sin (\hat{\omega} \delta t) - 2 \hat{\omega} \delta t - \hat{\omega} \delta t \cos (\hat{\omega} \delta t) }{\hat{\omega}^2} + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor ^2 +\f} + +The fourth integration we need is: + +\f{align*}{ + \boldsymbol{\Xi}_4 + & + = \frac{1}{6}\delta t^3 \lfloor\hat{\mathbf{a}} \rfloor + + \frac{2(1 - \cos (\hat{\omega} \delta t)) - (\hat{\omega}^2 \delta t^2)}{2 \hat{\omega}^3} + \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor + \nonumber \\ & + + \left( + \frac{2(1- \cos(\hat{\omega} \delta t)) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t)}{\hat{\omega}^3} + \right) + \lfloor \hat{\mathbf{k}} \rfloor\lfloor\hat{\mathbf{a}} \rfloor + \nonumber \\ & + + \left( + \frac{\sin (\hat{\omega} \delta t) - \hat{\omega} \delta t}{\hat{\omega}^3} + + \frac{\delta t^3}{6} + \right) + \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor^2 + \nonumber \\ & + + + \frac{\hat{\omega} \delta t - 2 \sin(\hat{\omega} \delta t) + \frac{1}{6}(\hat{\omega} \delta t)^3 + \hat{\omega} \delta t \cos(\hat{\omega} \delta t)}{\hat{\omega}^3} + \lfloor \hat{\mathbf{k}} \rfloor^2\lfloor\hat{\mathbf{a}} \rfloor + \nonumber \\ & + + + \frac{\hat{\omega} \delta t - 2 \sin(\hat{\omega} \delta t) + \frac{1}{6}(\hat{\omega} \delta t)^3 + \hat{\omega} \delta t \cos(\hat{\omega} \delta t)}{\hat{\omega}^3} + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + \nonumber \\ & + + + \frac{4 \cos(\hat{\omega} \delta t) - 4 + (\hat{\omega} \delta t)^2 + \hat{\omega} \delta t \sin(\hat{\omega} \delta t) } + {\hat{\omega}^3} + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor^2 +\f} + +When \f$\hat{\omega}\f$ is too small, in order to avoid numerical instability, we can compute the above integration +identities as (see [L'Hôpital's rule](https://en.wikipedia.org/wiki/L%27H%C3%B4pital%27s_rule)): + +\f{align*}{ + \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_1 + & = + \delta t \mathbf{I}_3 + + \delta t \sin (\hat{\omega} \delta t_i) + \lfloor \hat{\mathbf{k}} \rfloor + + + \delta t + \left( + 1 - \cos(\hat{\omega} \delta t) + \right) + \lfloor \hat{\mathbf{k}} \rfloor^2 + \\ + \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_2 + & = + \frac{\delta t^2}{2} \mathbf{I}_3 + + \frac{\delta t^2}{2} \sin(\hat{\omega} \delta t) + \lfloor \hat{\mathbf{k}} \rfloor + + + \frac{\delta t^2}{2} + \left( + 1 - + \cos(\hat{\omega} \delta t) + \right) \lfloor \hat{\mathbf{k}} \rfloor ^2 + \\ + & = \frac{\delta t}{2} \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_1 + \\ + \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_3 + & = + \frac{\delta t^2}{2} \lfloor \hat{\mathbf{a}} \rfloor + + + \frac{\delta t^2 \sin (\hat{\omega} \delta t)}{2} + \left( + - \lfloor \hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor + + \lfloor \hat{\mathbf{k}} \rfloor \lfloor \hat{\mathbf{a}} \rfloor + + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor^2 + \right) + \notag + \\ + & + \frac{\delta t^2}{2} (1 - \cos (\hat{\omega} \delta t)) + \left( + \lfloor \hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor^2 + + \lfloor \hat{\mathbf{k}} \rfloor^2 \lfloor \hat{\mathbf{a}} \rfloor + + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + \right) + \\ + \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_4 + &= + \frac{\delta t^3}{6} \lfloor \hat{\mathbf{a}} \rfloor + + + \frac{\delta t^3 \sin (\hat{\omega} \delta t)}{6} + \left( + - \lfloor \hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor + + \lfloor \hat{\mathbf{k}} \rfloor \lfloor \hat{\mathbf{a}} \rfloor + + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor^2 + \right) + \notag + \\ + & + \frac{\delta t^3}{6} (1 - \cos (\hat{\omega} \delta t)) + \left( + \lfloor \hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor^2 + + \lfloor \hat{\mathbf{k}} \rfloor^2 \lfloor \hat{\mathbf{a}} \rfloor + + \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + \right) + \\ + & = \frac{\delta t}{3} \lim\limits_{\hat{\omega} \rightarrow 0} + \boldsymbol{\Xi}_3 +\f} + +We currently do not implement a [RK4](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) version of the above integrations. +This would likely improve performance if leveraged as it would allow for the measurement to evolve over the integration period. + + +*/ \ No newline at end of file diff --git a/docs/propagation-discrete.dox b/docs/propagation-discrete.dox new file mode 100644 index 000000000..cec709818 --- /dev/null +++ b/docs/propagation-discrete.dox @@ -0,0 +1,326 @@ +/** + +@page propagation_discrete Discrete Propagation +@tableofcontents + +The state definition and general propagation equations are covered in the @ref propagation page. +Here we will cover a simplified discrete inertial model which does not include IMU intrinsic parameters. +First we will cover how we can propagate our mean forward, +afterwhich we cover how to derive the linearized discrete error-state system. + + + +@section disc_quat_integration Zeroth Order Quaternion Integrator + +First we look at how to integrate our orientation equation forward in time. +One could use the integration property of a \f$SO(3)\f$'s matrix exponential or as we will do here, derive it for a quaternion orientation. +The solution to the quaternion evolution has the following general form +(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR Section 1.6): + +\f{align*}{ + ^{I_{t}}_{G}\bar{q} = \boldsymbol{\Theta}(t,t_k) \text{}^{I_{k}}_{G}\bar{q} +\f} + +Differentiating and reordering the terms yields the governing equation for +\f$\boldsymbol{\Theta}(t,t_k)\f$ as + +\f{align*}{ + \boldsymbol{\Theta}(t,t_k) &= \text{}^{I_t}_G\bar{q} \text{ }^{I_k}_{G}\bar{q}^{-1}\\ + \Rightarrow \dot{\boldsymbol{\Theta}}(t,t_k) &= + \text{}^{I_t}_G\dot{\bar{q}} \text{ }^{I_k}_{G}\bar{q}^{-1}\\ + &= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \text{ }^{I_t}_{G}\bar{q} + \text{ }^{I_k}_{G}\bar{q}^{-1}\\ + &= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \boldsymbol{\Theta}(t,t_k) +\f} + +where we have defined: + +\f{align*}{ +\boldsymbol{\Omega}(\boldsymbol{\omega}(t)) &= +\begin{bmatrix} -\lfloor \boldsymbol{\omega}(t) \times \rfloor +&& \boldsymbol{\omega}(t) \\ +-\boldsymbol{\omega}(t)^\top && 0 \end{bmatrix} +\f} + +and \f$\boldsymbol{\Theta}(t_k,t_k) = \mathbf{I}_{4}\f$. +If we take \f$\boldsymbol{\omega}(t) = \boldsymbol{\omega}\f$ to be constant over the the period +\f$\Delta t_k = t_{k+1} - t_k\f$, +then the above system is linear time-invariant (LTI), and +\f$\boldsymbol{\Theta}\f$ can be solved as (see @cite Maybeck1982STOC): + +\f{align*}{ + \boldsymbol{\Theta}(t_{k+1},t_k) + &= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\boldsymbol{\omega})\Delta t_k\bigg)\\ + &= \cos\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t_k \bigg) \cdot \mathbf{I}_4 + + \frac{1}{|\boldsymbol{\omega}|}\sin\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t_k \bigg) + \cdot \boldsymbol{\Omega}(\boldsymbol{\omega})\\ +&\simeq + \mathbf{I}_4 + \frac{\Delta t_k}{2}\boldsymbol{\Omega}(\boldsymbol{\omega}) +\f} + +where \f$\exp(\cdot)\f$ is the general matrix exponential which has a closed form solution, and +the approximation assumes small \f$|\boldsymbol{\omega}|\f$. +We can formulate the quaternion propagation from \f$t_k\f$ to \f$t_{k+1}\f$ using the estimated +rotational velocity \f$\hat{\boldsymbol{\omega}}(t) = \hat{\boldsymbol{\omega}}\f$ as: + +\f{align*}{ + \text{}^{I_{k+1}}_{G}\hat{\bar{q}} + = \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\hat{\boldsymbol{\omega}})\Delta t_k\bigg) + \text{}^{I_{k}}_{G}\hat{\bar{q}} +\f} + + + +@section disc_prop Discrete-time IMU Propagation + +We model the measurements as discrete-time over the integration period. +To do this, the measurements can be assumed to be constant during the sampling period. +We employ this assumption and approximate that the measurement at time \f$t_k\f$ remains +the same until we get the next measurement at \f$t_{k+1}\f$. +For the quaternion propagation, it is the same as continuous-time propagation +with constant measurement assumption \f${\boldsymbol{\omega}}_{m}(t_k) = {\boldsymbol{\omega}}_{m,k}\f$. +We use subscript \f$k\f$ to denote it is the measurement we get at time \f$t_k\f$. +Therefore the propagation of quaternion can be written as: + +\f{align*}{ + \text{}^{I_{k+1}}_{G}\hat{\bar{q}} + = \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}\big({\boldsymbol{\omega}}_{m,k}-{\mathbf{b}}_{g,k} - \mathbf{n}_{g,k}\big)\Delta t_k\bigg) + \text{}^{I_{k}}_{G}\hat{\bar{q}} +\f} + +where \f$\Delta t_k = t_{k+1} - t_{k}\f$. +For the velocity and position propagation we have constant +\f$\mathbf{a}_{m}(t_k) = \mathbf{a}_{m,k}\f$ over \f$t \in [t_k, t_{k+1}]\f$. +We can therefore directly solve for the new states as: + +\f{align*}{ + ^G{\mathbf{p}}_{I_{k+1}} + &= \text{}^G{\mathbf{p}}_{I_k} + {}^G{\mathbf{v}}_{I_k} \Delta t_k + - \frac{1}{2}{}^G\mathbf{g}\Delta t_k^2 + + \frac{1}{2} \text{}^{I_k}_{G}{\mathbf{R}}^\top(\mathbf{a}_{m,k} - {\mathbf{b}}_{{a},k} - \mathbf{n}_{a,k})\Delta t_k^2 \\ + ^G{\mathbf{v}}_{k+1} &= \text{}^G{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t_k + +\text{}^{I_k}_{G}{\mathbf{R}}^\top(\mathbf{a}_{m,k} - {\mathbf{b}}_{{a},k} - \mathbf{n}_{a,k})\Delta t_k +\f} + +The propagation of each bias is likewise the continuous system: + +\f{align*}{ + {\mathbf{b}}_{g,k+1} &= {\mathbf{b}}_{g,k} + \mathbf{n}_{wg,k} \\ + {\mathbf{b}}_{a,k+1} &= {\mathbf{b}}_{a,k} + \mathbf{n}_{wa,k} +\f} + +Note that the noises used here are the discrete ones which need to be converted from their continuous-time versions. +In particular, when the covariance matrix of the continuous-time measurement noises is given by +\f$\mathbf{Q}_c\f$ and \f$\mathbf{n}_c = [ \mathbf{n}_{g_c} ~ \mathbf{n}_{a_c} ~ \mathbf{n}_{wg_c} ~ \mathbf{n}_{wa_c} ]^\top\f$, +then the discrete-time noise covariance \f$\mathbf{Q}_d\f$ can be computed as +(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR Eq. (129) and (130)): + +\f{align*}{ +\sigma_{g} &= \frac{1}{\sqrt{\Delta t}}~ \sigma_{w_c} \\ +\sigma_{wg} &= \sqrt{\Delta t}~ \sigma_{wg_c} \\[1em] +\mathbf{Q}_{meas} &= +\begin{bmatrix} +\frac{1}{\Delta t}~ \sigma_{g_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\ +\mathbf{0}_3 & \frac{1}{\Delta t}~ \sigma_{a_c}^2~ \mathbf{I}_3 +\end{bmatrix} \\ +\mathbf{Q}_{bias} &= +\begin{bmatrix} +\Delta t~ \sigma_{wg_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\ +\mathbf{0}_3 & \Delta t~ \sigma_{wa_c}^2~ \mathbf{I}_3 +\end{bmatrix} +\f} + +where \f$\mathbf{n} = [ \mathbf{n}_{g} ~ \mathbf{n}_{a} ~ \mathbf{n}_{wg} ~ \mathbf{n}_{wa} ]^\top\f$ are the discrete +IMU sensor noises which have been converted from their continuous representations. +We define the stacked discrete measurement noise as follows: + +\f{align*}{ +\mathbf{Q}_{d} &= +\begin{bmatrix} +\mathbf{Q}_{meas} & \mathbf{0}_3 \\ +\mathbf{0}_3 & \mathbf{Q}_{bias} +\end{bmatrix} +\f} + + + +@section error_prop Discrete-time Error-state Propagation + +In order to propagate the covariance matrix, we should derive the linearized error-state propagation, +i.e., computing the system Jacobian \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and noise Jacobian \f$\mathbf{G}_{k}\f$. +The method of computing Jacobians is to "perturb" each variable in the system and see how the old error "perturbation" relates to the new error state. +That is, +\f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ can be found by perturbing each variable as: +\f{align*}{ +\tilde{\mathbf{x}}_I(t_{k+1}) = \boldsymbol{\Phi}(t_{k+1},t_k) \tilde{\mathbf{x}}_I(t_{k}) + \mathbf{G}_{k} \mathbf{n} +\f} +For the orientation error propagation, we start with the \f${SO}(3)\f$ perturbation using +\f${}^{I}_G \mathbf{R} \approx (\mathbf{I}_3 - \lfloor ^{I}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I}_{G} \hat{\mathbf{R}}\f$ +(which can be shown to be exactly equivalent to the state's quaternion error, see @cite Trawny2005TR and @ref ov_type::JPLQuat): + +\f{align*}{ +{}^{I_{k+1}}_G \mathbf{R} &= \text{}^{I_{k+1}}_{I_{k}} \mathbf{R} \text{}^{I_{k}}_G \mathbf{R} \\ +(\mathbf{I}_3 - \lfloor ^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k+1}}_{G} +\hat{\mathbf{R}} +&\approx \exp(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k - {}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t_k) +(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G} +\hat{\mathbf{R}}\\ +&=\exp(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k)\exp(-\mathbf{J}_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k){}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t_k) +(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G} +\hat{\mathbf{R}}\\ +&=\text{}^{I_{k+1}}_{I_{k}} \hat{\mathbf{R}} + (\mathbf{I}_3 - \lfloor \mathbf J_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k) + \tilde{\boldsymbol{\omega}}_k\Delta t_k \times\rfloor) +(\mathbf{I}_3 - \lfloor ^{I_k}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) +\text{}^{I_{k}}_G \hat{\mathbf{R}} +\f} + +where \f$\tilde{\boldsymbol{\omega}} = \boldsymbol{\omega} - \hat{\boldsymbol{\omega}} += -(\tilde{\mathbf{b}}_{{g}} + \mathbf{n}_g)\f$ handles both the perturbation to the bias and measurement noise. +\f$\mathbf {J}_r(\boldsymbol{\theta})\f$ is the right Jacobian of \f${SO}(3)\f$ +that maps the variation of rotation angle in the parameter vector space into the variation in the tangent vector space to the manifold +[see @ref ov_core::Jr_so3() and @cite Barfoot2017]. +By neglecting the second order terms from above, we obtain the following orientation error propagation: + +\f{align*} +\text{}^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}} \approx +\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} \text{}^{I_k}_{G}\tilde{\boldsymbol{\theta}} +- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r({}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) +\Delta t_k (\tilde{\mathbf{b}}_{{g},k} + \mathbf{n}_{{g},k}) +\f} + +where we have defined \f${}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}=-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t_k\f$. +This describes how tht error of the next orientation is related to the previous, gyroscope bias, and noise. +Now we can do error propagation of position and velocity using the same scheme: + +\f{align*}{ +^G\mathbf{p}_{I_{k+1}} + &= \text{}^G\mathbf{p}_{I_k} + \text{}^G\mathbf{v}_{I_k} \Delta t_k + - \frac{1}{2}{}^G\mathbf{g}\Delta t_k^2 + + \frac{1}{2}\text{}^{I_k}_G\mathbf{R}^\top \mathbf{a}_{k}\Delta t_k^2\\ + +^G\hat{\mathbf{p}}_{I_{k+1}} + \text{}^G\tilde{\mathbf{p}}_{I_{k+1}} + &\approx \text{}^G\hat{\mathbf{p}}_{I_k} + \text{}^G\tilde{\mathbf{p}}_{I_k} + + \text{}^G\hat{\mathbf{v}}_{I_k} \Delta t_k + + \text{}^G\tilde{\mathbf{v}}_{I_k} \Delta t_k + - \frac{1}{2}{}^G\mathbf{g}\Delta t_k^2\\ + &\hspace{4cm} + \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top + (\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) + (\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t_k^2\\ +\f} + +\f{align*}{ +^G\mathbf{v}_{k+1} &= \text{}^G\mathbf{v}_{I_k} - {}^G\mathbf{g}\Delta t_k ++\text{}^{I_k}_G\mathbf{R}^\top\mathbf{a}_{k}\Delta t_k\\ + +^G\hat{\mathbf{v}}_{k+1} + ^G\tilde{\mathbf{v}}_{k+1} &\approx +{}^G\hat{\mathbf{v}}_{I_k} + {}^G\tilde{\mathbf{v}}_{I_k} +- {}^G\mathbf{g}\Delta t_k ++ \text{}^{I_k}_G\hat{\mathbf{R}}^\top +(\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) +(\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t_k +\f} + +where \f$\tilde{\mathbf{a}} = \mathbf{a} - \hat{\mathbf{a}} + = - (\tilde{\mathbf{b}}_{{a}} + \mathbf{n}_{{a}})\f$. +By neglecting the second order error terms, we obtain the following position and velocity error propagation: + +\f{align*}{ +\text{}^G\tilde{\mathbf{p}}_{I_{k+1}} &= +\text{}^G\tilde{\mathbf{p}}_{I_k} ++ \text{}^G\tilde{\mathbf{v}}_{I_k} \Delta t_k +- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top +\lfloor \hat{\mathbf{a}}_{k} \Delta t_k^2 \times\rfloor +^{I_{k}}_{G}\tilde{\boldsymbol{\theta}} +- \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t_k^2 +(\tilde{\mathbf{b}}_{{a},k} + \mathbf{n}_{{a},k})\\ +^G\tilde{\mathbf{v}}_{k+1} &= +\text{}^G\tilde{\mathbf{v}}_{I_k} + - \text{}^{I_k}_G\hat{\mathbf{R}}^\top + \lfloor \hat{\mathbf{a}}_{k} \Delta t_k \times\rfloor + ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}} + - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t_k + (\tilde{\mathbf{b}}_{{a},k} + \mathbf{n}_{{a},k}) +\f} + +The propagation of the two random-walk biases are as follows: + +\f{align*}{ + \mathbf{b}_{\mathbf{g},k+1} &= \mathbf{b}_{{g},k} + \mathbf{n}_{wg} \\ + \hat{\mathbf{b}}_{{g},k+1} + \tilde{\mathbf{b}}_{{g},k+1} &= + \hat{\mathbf{b}}_{{g},k} + \tilde{\mathbf{b}}_{{g},k} + + \mathbf{n}_{wg} \\ + \tilde{\mathbf{b}}_{{g},k+1} &= + \tilde{\mathbf{b}}_{{g},k} + \mathbf{n}_{wg} +\f} +\f{align*}{ +\mathbf{b}_{\mathbf{a},k+1} &= \mathbf{b}_{\mathbf{a},k} + \mathbf{n}_{wa} \\ + \hat{\mathbf{b}}_{{a},k+1} + \tilde{\mathbf{b}}_{{a},k+1} &= + \hat{\mathbf{b}}_{{a},k} + \tilde{\mathbf{b}}_{{a},k} + + \mathbf{n}_{wa} \\ + \tilde{\mathbf{b}}_{{a},k+1} &= + \tilde{\mathbf{b}}_{{a},k} + \mathbf{n}_{wa} +\f} + +By collecting all the perturbation results, we can build \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices as: + + +\f{align*}{ +\boldsymbol{\Phi}(t_{k+1},t_k) &= +\begin{bmatrix} +\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} & \mathbf{0}_3 & \mathbf{0}_3 & +- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) +\Delta t_k & \mathbf{0}_3 \\ + +- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t_k^2 \times\rfloor +& \mathbf{I}_3 & \Delta t_k \mathbf{I}_3 & \mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t_k^2 \\ + +- \text{}^{I_k}_G\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t_k \times\rfloor +& \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t_k \\ + +\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 \\ +\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3 +\end{bmatrix} +\f} + + + +\f{align*}{ +\mathbf{G}_{k} &= +\begin{bmatrix} +- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) +\Delta t_k & \mathbf{0}_3 +& \mathbf{0}_3 & \mathbf{0}_3 \\ + +\mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t_k^2 +& \mathbf{0}_3 & \mathbf{0}_3 \\ + +\mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t_k +& \mathbf{0}_3 & \mathbf{0}_3 \\ + +\mathbf{0}_3 & \mathbf{0}_3 +& \mathbf{I}_3 & \mathbf{0}_3 \\ + +\mathbf{0}_3 & \mathbf{0}_3 +& \mathbf{0}_3 & \mathbf{I}_3 +\end{bmatrix} +\f} + + +@m_class{m-note m-frame} + +@par How to apply FEJ to ensure consistency? +When using First-Estimate Jacobians (see @ref fej for an overview) for the state transition matrix, +we need to ensure that we evaluate all state estimates of prior seen states at the same linearization point +they were previously evaluated at (their estimate before update / their estimate when they were first initialized into the state). +This ensure the semi-group property of the state transition matrix. + + +Now, with the computed \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices, +we can propagate the covariance from \f$t_k\f$ to \f$t_{k+1}\f$: +\f{align*}{ +\mathbf{P}_{k+1|k} = \boldsymbol{\Phi}(t_{k+1},t_k)\mathbf{P}_{k|k}\boldsymbol{\Phi}(t_{k+1},t_k)^\top + \mathbf{G}_k\mathbf{Q}_d\mathbf{G}_k^\top +\f} + + +*/ \ No newline at end of file diff --git a/docs/propagation.dox b/docs/propagation.dox index 1846a373c..0014e6f96 100644 --- a/docs/propagation.dox +++ b/docs/propagation.dox @@ -11,20 +11,60 @@ which provides measurements of the local rotational velocity (angular rate) \f$\ local translational acceleration \f$\mathbf{a}_m\f$: \f{align*}{ - \boldsymbol{\omega}_m(t) &= \boldsymbol{\omega}(t) + \mathbf{b}_{g}(t) + \mathbf{n}_{{g}}(t)\\ - \mathbf{a}_m(t) &= \mathbf{a}(t) + {}^I_G\mathbf{R}(t) {^G\mathbf{g}} + \mathbf{b}_{a}(t) + \mathbf{n}_{{a}}(t) + {}^I\boldsymbol{\omega}_m(t) &= {}^I\boldsymbol{\omega}(t) + \mathbf{b}_{g}(t) + \mathbf{n}_{{g}}(t)\\ + {}^I\mathbf{a}_m(t) &= {}^I\mathbf{a}(t) + {}^I_G\mathbf{R}(t) {^G\mathbf{g}} + \mathbf{b}_{a}(t) + \mathbf{n}_{{a}}(t) \f} -where \f$\boldsymbol{\omega}\f$ and \f$\mathbf{a}\f$ are the true rotational velocity and translational +where \f${}^I\boldsymbol{\omega}\f$ and \f${}^I\mathbf{a}\f$ are the true rotational velocity and translational acceleration in the IMU local frame \f$\{I\}\f$, \f$\mathbf{b}_{g}\f$ and \f$\mathbf{b}_{a}\f$ are the gyroscope and accelerometer biases, and \f$\mathbf{n}_{{g}}\f$ \f$\mathbf{n}_{{a}}\f$ are white Gaussian noise, \f${^G\mathbf{g}} = [ 0 ~~ 0 ~~ 9.81 ]^\top\f$ is the gravity expressed in the global frame \f$\{G\}\f$ (noting that the gravity is slightly different on different locations of the globe), -and \f$^I_G\mathbf{R}\f$ is the rotation matrix from global to IMU local frame. +and \f$^I_G\mathbf{R}\f$ is the rotation matrix from global to IMU local frame. + +A more complete and complex model which also takes into account the intrinsic properties of the IMU can be considered. +[MEMS IMU](https://en.wikipedia.org/wiki/Vibrating_structure_gyroscope#MEMS_gyroscopes) typically have non-perfect readings which would invalidate the above equations. +We can define additional sensor frames \f$\{w\}\f$ and \f$\{a\}\f$ which can be related back to the "ideal" inertial frame \f$\{I\}\f$ that we wish to estimate. +Thus we can define the following: + +\f{align*}{ +{}^w\boldsymbol{\omega}_{m}(t) & = + \mathbf{T}_{w} {}^w_I\mathbf{R} {}^I\boldsymbol{\omega}(t) + + \mathbf{T}_{g} {}^I\mathbf{a}(t) + \mathbf{b}_g(t) + \mathbf{n}_g(t) \\ +{}^a\mathbf{a}_{m}(t) & = \mathbf{T}_a {}^a_I\mathbf{R} + ({}^I\mathbf{a}(t) + {}^I_G\mathbf{R}(t) {^G\mathbf{g}}) + + \mathbf{b}_a(t) + \mathbf{n}_a(t) +\f} + +where \f$\mathbf{T}_{w}\f$ and \f$\mathbf{T}_a\f$ are invertible \f$3\times3\f$ matrices which represent the +scale imperfection and axis misalignment for \f$\{w\}\f$ and \f$\{a\}\f$, respectively. +\f${}^w_I\mathbf{R}\f$ and \f${}^a_I\mathbf{R}\f$ denote the rotation from the base "inertial" frame \f$\{I\}\f$ to +the gyroscope and acceleration sensor frames. +\f$\mathbf{T}_g\f$ denotes the gravity sensitivity, which represents the effects of gravity to the +gyroscope readings due to its inherent inertia. +Note that this does not take into account the translation between the +gyroscope and accelerometer frames, since it is negligible for most MEMS IMUs (see @cite Li2014ICRA @cite Schneider2019Sensor). + + + +@m_class{m-block m-warning} + +@par Calibration of Inertial Intrinsics +It is important to note that calibration of inertial intrinsics introduces many additional degrees of freedoms into the state. +Without picking a confident prior distribution for these, the initial trajectory needs to ensure sufficient excitation to enable +calibration and avoid degeneracies. +As found in @cite Yang2020RSS @cite Yang2023TRO there are many trajectories (e.g. planar, 1-axis, constant meas, etc..) +which can hurt inertial intrinsic calibration. +In general we *do not* recommend calibration (but still do it offline!), unless you know your trajectory will +have good excitation (e.g. dynamic handheld). +The full background, summary, and analysis of different inertial models can be found in @cite Yang2020RSS @cite Yang2023TRO. -@section ins_state State Vector + + + +@section ins_state Inertial State Vector We define our INS state vector \f$\mathbf{x}_I\f$ at time \f$t\f$ as: @@ -34,8 +74,8 @@ We define our INS state vector \f$\mathbf{x}_I\f$ at time \f$t\f$ as: ^I_G\bar{q}(t) \\ ^G\mathbf{p}_I(t) \\ ^G\mathbf{v}_I(t)\\ -\mathbf{b}_{\mathbf{g}}(t) \\ -\mathbf{b}_{\mathbf{a}}(t) +\mathbf{b}_{{g}}(t) \\ +\mathbf{b}_{{a}}(t) \end{bmatrix} \f} @@ -46,7 +86,7 @@ We will often write time as a subscript of \f$I\f$ describing the state of IMU a for notation clarity (e.g., \f$^{I_t}_G\bar{q} = \text{}^I_G\bar{q}(t)\f$). In order to define the IMU error state, the standard additive error definition is employed for the position, velocity, and biases, -while we use the quaternion error state \f$\delta \bar{q}\f$ with a left quaternion multiplicative error +while we use the quaternion error state \f$\delta \bar{q}\f$ with a left quaternion multiplicative error \f$\otimes\f$: \f{align*}{ @@ -80,394 +120,103 @@ The total IMU error state thus is defined as the following 15x1 (not 16x1) vecto -@section imu_kinematic IMU Kinematics +@section imu_kinematic IMU Kinematic Equations The IMU state evolves over time as follows (see [Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf) @cite Trawny2005TR). - -\f{align*}{ \newcommand{\comm}[1]{} - +\f{align*}{ ^I_G\dot{\bar{q}}(t) - -\comm{ - &= \lim_{\delta t \to 0} \frac{1}{\delta t} - (^{I_{t + \delta t}}_G\bar{q} - \text{}^{I_{t}}_G\bar{q})\\ - - &= \lim_{\delta t \to 0} \frac{1}{\delta t} - (^{I_{t + \delta t}}_{L_{t}}\bar{q} \otimes ^{I_{t}}_{G}\bar{q} - - \bar{q}_0 \otimes \text{}^{I_{t}}_G\bar{q})\\ - - &= \lim_{\delta t \to 0} \frac{1}{\delta t} - (^{I_{t + \delta t}}_{L_{t}}\bar{q} - \bar{q}_0 ) \otimes \text{}^{I_{t}}_{G}\bar{q}\\ - - &\approx \lim_{\delta t \to 0} \frac{1}{\delta t} - \Bigg (\begin{bmatrix} \frac{1}{2}\delta \boldsymbol{\theta}\\ 1 \end{bmatrix} - -\begin{bmatrix} \boldsymbol{0}\\ 1 \end{bmatrix} \Bigg ) - \otimes \text{}^{I_{t}}_{G}\bar{q} \\ - - &= \frac{1}{2} \begin{bmatrix} \boldsymbol{\omega}\\ 0 \end{bmatrix} - \otimes \text{}^{I_{t}}_{G}\bar{q}\\} - - &= \frac{1}{2} \begin{bmatrix} -\lfloor \boldsymbol{\omega}(t) \times \rfloor - && \boldsymbol{\omega}(t) \\ - -\boldsymbol{\omega}^\top(t) && 0 \end{bmatrix} \text{}^{I_{t}}_{G}\bar{q}\\ - - &=: \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \text{}^{I_{t}}_{G}\bar{q}\\ - + &= \frac{1}{2} \begin{bmatrix} -\lfloor {}^{I}\boldsymbol{\omega}(t) \times \rfloor + && {}^{I}\boldsymbol{\omega}(t) \\ + -{}^{I}\boldsymbol{\omega}(t)^\top && 0 \end{bmatrix} \text{}^{I_{t}}_{G}\bar{q}\\ + &\triangleq \frac{1}{2} \boldsymbol{\Omega}({}^{I}\boldsymbol{\omega}(t)) \text{}^{I_{t}}_{G}\bar{q}\\ ^G\dot{\mathbf{p}}_I(t) &=\text{} ^G\mathbf{v}_I(t)\\ - - ^G\dot{\mathbf{v}}_I(t) &=\text{} ^{I_{t}}_G\mathbf{R}^\top\mathbf{a}(t) \\ - - \dot{\mathbf{b}}_{\mathbf{g}}(t) &= \mathbf{n}_{wg}\\ - - \dot{\mathbf{b}}_{\mathbf{a}}(t) &= \mathbf{n}_{wa} + ^G\dot{\mathbf{v}}_I(t) &=\text{} ^{I_{t}}_G\mathbf{R}^\top {}^{I}\mathbf{a}(t) \\ + \dot{\mathbf{b}}_{\mathbf{g}}(t) &= \mathbf{n}_{wg}(t) \\ + \dot{\mathbf{b}}_{\mathbf{a}}(t) &= \mathbf{n}_{wa}(t) \f} -where we have modeled the gyroscope and accelerometer biases as random walk +where we have modeled the gyroscope and accelerometer biases as random walk and thus their time derivatives are white Gaussian. -Note that the above kinematics have been defined in terms of the *true* acceleration and angular velocities. - +Note that the above kinematics have been defined in terms of the *true* acceleration and +angular velocities which can be computed as a function of the sensor measurements and state. -@section conti_prop Continuous-time IMU Propagation +@section conti_prop Continuous-time Inertial Integration - -Given the continuous-time measurements \f$\boldsymbol{\omega}_m(t)\f$ -and \f$\mathbf{a}_m(t)\f$ in the time interval \f$t \in [t_k,t_{k+1}]\f$, +Given the continuous-time \f${}^{I}\boldsymbol{\omega}(t)\f$ +and \f${}^{I}\mathbf{a}(t)\f$, in the time interval \f$t \in [t_k,t_{k+1}]\f$, and their estimates, i.e. after taking the expectation, -\f$\hat{\boldsymbol{\omega}}(t) = \boldsymbol{\omega}_m(t) - \hat{\boldsymbol{b}}_g(t)\f$ and -\f$\hat{\boldsymbol{a}}(t) = \boldsymbol{a}_m(t) - \hat{\boldsymbol{b}}_a(t) - ^I_G\hat{\mathbf{R}}(t){}^G\mathbf{g}\f$, +\f${}^{I}\hat{\boldsymbol{\omega}}(t)\f$ and \f${}^{I}\hat{\boldsymbol{a}}(t)\f$, we can define the solutions to the above IMU kinematics differential equation. -The solution to the quaternion evolution has the following general form: - -\f{align*}{ - ^{I_{t}}_{G}\bar{q} = \boldsymbol{\Theta}(t,t_k) \text{}^{I_{k}}_{G}\bar{q} -\f} - -Differentiating and reordering the terms yields the governing equation for -\f$\boldsymbol{\Theta}(t,t_k)\f$ as - -\f{align*}{ \newcommand{\comm}[1]{} - - \boldsymbol{\Theta}(t,t_k) &= \text{}^{I_t}_G\bar{q} \text{ }^{I_k}_{G}\bar{q}^{-1}\\ - - \Rightarrow \dot{\boldsymbol{\Theta}}(t,t_k) &= - \text{}^{I_t}_G\dot{\bar{q}} \text{ }^{I_k}_{G}\bar{q}^{-1}\\ - &= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \text{ }^{I_t}_{G}\bar{q} - \text{ }^{I_k}_{G}\bar{q}^{-1}\\ - &= \frac{1}{2} \boldsymbol{\Omega}(\boldsymbol{\omega}(t)) \boldsymbol{\Theta}(t,t_k) -\f} -with \f$\boldsymbol{\Theta}(t_k,t_k) = \mathbf{I}_{4}\f$. -If we take \f$\boldsymbol{\omega}(t) = \boldsymbol{\omega}\f$ to be constant over the the period -\f$\Delta t = t_{k+1} - t_k\f$, -then the above system is linear time-invarying (LTI), and -\f$\boldsymbol{\Theta}\f$ can be solved as (see [[Stochastic Models, Estimation, and Control]](https://books.google.com/books?id=L_YVMUJKNQUC) @cite Maybeck1982STOC): - - -\f{align*}{ - \boldsymbol{\Theta}(t_{k+1},t_k) - &= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\boldsymbol{\omega})\Delta t\bigg)\\ - &= \cos\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t \bigg) \cdot \mathbf{I}_4 - + \frac{1}{|\boldsymbol{\omega}|}\sin\bigg(\frac{|\boldsymbol{\omega}|}{2} \Delta t \bigg) - \cdot \boldsymbol{\Omega}(\boldsymbol{\omega})\\ -&\simeq - \mathbf{I}_4 + \frac{\Delta t}{2}\boldsymbol{\Omega}(\boldsymbol{\omega}) -\f} - - -where the approximation assumes small \f$|\boldsymbol{\omega}|\f$. -We can formulate the quaternion propagation from \f$t_k\f$ to \f$t_{k+1}\f$ using the estimated -rotational velocity \f$\hat{\boldsymbol{\omega}}(t) = \hat{\boldsymbol{\omega}}\f$ as: - -\f{align*}{ - \text{}^{I_{k+1}}_{G}\hat{\bar{q}} - = \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}(\hat{\boldsymbol{\omega}})\Delta t\bigg) - \text{}^{I_{k}}_{G}\hat{\bar{q}} -\f} - -Having defined the integration of the orientation, we can integrate the velocity and position over the measurement interval: - -\f{align*}{ - ^G\hat{\mathbf{v}}_{k+1} &= \text{}^G\hat{\mathbf{v}}_{I_k} + \int_{t_{k}}^{t_{k+1}} - {^G\hat{\mathbf{a}}(\tau)} d\tau \\ - - &= \text{}^G\hat{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t+ \int_{t_{k}}^{t_{k+1}} - {^G_{I_{\tau}}\hat{\mathbf{R}}(\mathbf{a}_m(\tau) - \hat{\mathbf{b}}_{\mathbf{a}}(\tau)) d\tau}\\[1em] - - ^G\hat{\mathbf{p}}_{I_{k+1}} &= - \text{}^G\hat{\mathbf{p}}_{I_k} + \int_{t_{k}}^{t_{k+1}} - {^G\hat{\mathbf{v}}_I(\tau)} d\tau \\ - - &= \text{}^G\hat{\mathbf{p}}_{I_k} + \text{}^G\hat{\mathbf{v}}_{I_k} \Delta t - - \frac{1}{2}{}^G\mathbf{g}\Delta t^2 + - \int_{t_{k}}^{t_{k+1}} \int_{t_{k}}^{s} - {^G_{I_{\tau}}\hat{\mathbf{R}}(\mathbf{a}_m(\tau) - \hat{\mathbf{b}}_{\mathbf{a}}(\tau)) d\tau ds} -\f} - -Propagation of each bias \f$\hat{\mathbf{b}}_{\mathbf{g}}\f$ and \f$\hat{\mathbf{b}}_{\mathbf{a}}\f$ is given by: - -\f{align*}{ - \hat{\mathbf{b}}_{\mathbf{g},k+1} &= \hat{\mathbf{b}}_{\mathbf{g},k} + \int_{t_{k+1}}^{t_{k}} - {\hat{\mathbf{n}}_{wg}(\tau)} d\tau \\ - - &= \hat{\mathbf{b}}_{\mathbf{g},k} \\ - - \hat{\mathbf{b}}_{\mathbf{a},k+1} &= \hat{\mathbf{b}}_{\mathbf{a},k} + \int_{t_{k+1}}^{t_{k}} - {\hat{\mathbf{n}}_{wa}(\tau)} d\tau \\ - - &= \hat{\mathbf{b}}_{\mathbf{a},k} -\f} - -The biases will not evolve since our random walk noises \f$\hat{\mathbf{n}}_{wg}\f$ and \f$\hat{\mathbf{n}}_{wa}\f$ are zero-mean white Gaussian. -All of the above integrals could be analytically or numerically solved if one wishes to use the continuous-time measurement evolution model. - - - - - -@section disc_prop Discrete-time IMU Propagation - - -A simpler method is to model the measurements as discrete-time over the integration period. -To do this, the measurements can be assumed to be constant during the sampling period. -We employ this assumption and approximate that the measurement at time \f$t_k\f$ remains -the same until we get the next measurement at \f$t_{k+1}\f$. -For the quaternion propagation, it is the same as continuous-time propagation -with constant measurement assumption \f${\boldsymbol{\omega}}_{m}(t_k) = {\boldsymbol{\omega}}_{m,k}\f$. -We use subscript \f$k\f$ to denote it is the measurement we get at time \f$t_k\f$. -Therefore the propagation of quaternion can be written as: - -\f{align*}{ - \text{}^{I_{k+1}}_{G}\hat{\bar{q}} - = \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}\big({\boldsymbol{\omega}}_{m,k}-\hat{\mathbf{b}}_{g,k}\big)\Delta t\bigg) - \text{}^{I_{k}}_{G}\hat{\bar{q}} -\f} - -For the velocity and position propagation we have constant -\f$\mathbf{a}_{m}(t_k) = \mathbf{a}_{m,k}\f$ over \f$t \in [t_k, t_{k+1}]\f$. -We can therefore directly solve for the new states as: \f{align*}{ - ^G\hat{\mathbf{v}}_{k+1} &= \text{}^G\hat{\mathbf{v}}_{I_k} - {}^G\mathbf{g}\Delta t - +\text{}^{I_k}_G\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t\\ + {}^{I_{k+1}}_{G}\mathbf{R} & = + \exp \left(-\int^{t_{k+1}}_{t_k} {}^{I}\boldsymbol{\omega}(t_\tau) ~ d \tau \right) {}^{I_k}_{G}\mathbf{R} \\ + &\triangleq \Delta \mathbf{R}_k ~{}^{I_k}_{G}\mathbf{R} = {}^{I_{k+1}}_{I_k}\mathbf{R} ~{}^{I_k}_{G}\mathbf{R} \\ + {}^G\mathbf{p}_{I_{k+1}} & = + {}^G\mathbf{p}_{I_k} + {}^G\mathbf{v}_{I_k}\Delta t_k + + {}^{I_k}_{G}\mathbf{R}^\top + \int^{t_{k+1}}_{t_k} \int^{s}_{t_k} {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau d s + - \frac{1}{2}{}^G\mathbf{g}\Delta t^2_k \\ + &\triangleq + {}^G\mathbf{p}_{I_k} + {}^G\mathbf{v}_{I_k}\Delta t_k + + {}^{I_k}_{G}\mathbf{R}^\top \Delta \mathbf{p}_{k} + - \frac{1}{2}{}^G\mathbf{g}\Delta t^2_k \\ + {}^G\mathbf{v}_{I_{k+1}} & = {}^G\mathbf{v}_{I_k} + + {}^{I_k}_{G}\mathbf{R}^\top + \int^{t_{k+1}}_{t_k} + {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau + - {}^G\mathbf{g}\Delta t_k \\ + &\triangleq {}^G\mathbf{v}_{I_k} + + {}^{I_k}_{G}\mathbf{R}^\top \Delta \mathbf{v}_{k} + - {}^G\mathbf{g}\Delta t_k \\ + \mathbf{b}_{g_{k+1}} & = \mathbf{b}_{g_{k}} + \int^{t_{k+1}}_{t_k} \mathbf{n}_{wg}(t_\tau) ~ d \tau \\ + \mathbf{b}_{a_{k+1}} & = \mathbf{b}_{a_{k}} + \int^{t_{k+1}}_{t_k} \mathbf{n}_{wa}(t_\tau) ~ d \tau +\f} + +where \f$\Delta t_k = t_{k+1} - t_{k}\f$, +\f${}^{I_k}_{I_\tau}\mathbf{R} = \exp(\int^{t_\tau}_{t_k} {}^{I}\boldsymbol{\omega}(t_u) ~ d u ) \f$, +\f$\exp(\cdot)\f$ is the \f$SO(3)\f$ matrix exponential @cite Chirikjian2011, and vectors are evaluated at +their subscript timesteps (e.g. \f${}^G\mathbf{v}_{I_k}={}^G\mathbf{v}_{I}(t_k)\f$). +The biases are corrupted by random walk noises \f${\mathbf{n}}_{wg}\f$ and \f${\mathbf{n}}_{wa}\f$ that are zero-mean white Gaussians. +We have the following integration components: + +\f{align*}{ + \Delta \mathbf{R}_k & \triangleq + {}^{I_{k+1}}_{I_k}\mathbf{R} + = \exp + \left(-\int^{t_{k+1}}_{t_{k}} {}^{I}\boldsymbol{\omega}(t_\tau) ~ d \tau\right) + \\ + \Delta \mathbf{p}_{k} & \triangleq + \int^{t_{k+1}}_{t_{k}} \int^{s}_{t_{k}} {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau d s + \\ + \Delta \mathbf{v}_{k} & \triangleq + \int^{t_{k+1}}_{t_{k}} + {}^{I_k}_{I_\tau}\mathbf{R} {}^{I} \mathbf{a}(t_\tau) ~ d \tau +\f} + + + +@section prop_integration Mean and Uncertainty Integration + +Having defined the general equations for state evolution, we can now look into how to actually perform these integrations. +The following two documents detail two different ways to both evolve the state forward and its covariance. +The first is a simple discrete state evolution model without any IMU intrinsics and can be helpful to understand the general flow of derivation. +The second is the full continuous-time integration (with constant measurement assumption) of the mean and state covariance. + +- @subpage propagation_discrete --- Simplified discrete mean and covariance propagation derivations +- @subpage propagation_analytical --- Analytical mean and covariance propagation with IMU intrinsics derivations + +For general background on how to formulate the evolution of states over time we can make a few recommendations. +For classical linear systems we can recommend Maybeck's @cite Maybeck1982STOC Volume 1 and Bar-Shalom et al. @cite Bar2001 discussion of estimator consistency. +For background on Kalman filtering and propagation with similar notation to ours we can recommend Farrell's @cite Farrell2008 aided navigation book. +Finally, for the inertial problem formulation and equations Chatfield's @cite Chatfield1997 fundamental remains foundational. - ^G\hat{\mathbf{p}}_{I_{k+1}} - &= \text{}^G\hat{\mathbf{p}}_{I_k} + {}^G\hat{\mathbf{v}}_{I_k} \Delta t - - \frac{1}{2}{}^G\mathbf{g}\Delta t^2 - + \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top(\mathbf{a}_{m,k} - \hat{\mathbf{b}}_{\mathbf{a},k})\Delta t^2 -\f} - -The propagation of each bias is likewise the continuous system: - -\f{align*}{ - \hat{\mathbf{b}}_{\mathbf{g},k+1} &= \hat{\mathbf{b}}_{\mathbf{g},k}\\ - \hat{\mathbf{b}}_{\mathbf{a},k+1} &= \hat{\mathbf{b}}_{\mathbf{a},k} -\f} - - - - - - - -@section error_prop Discrete-time Error-state Propagation - -In order to propagate the covariance matrix, we should derive the error-state propagation, -i.e., computing the system Jacobian \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and noise Jacobian \f$\mathbf{G}_{k}\f$. -In particular, when the covariance matrix of the continuous-time measurement noises is given by -\f$\mathbf{Q}_c\f$, then the discrete-time noise covariance \f$\mathbf{Q}_d\f$ can be computed as -(see [[Indirect Kalman Filter for 3D Attitude Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf)] @cite Trawny2005TR Eq. (129) and (130)): - -\f{align*}{ -\sigma_{g} &= \frac{1}{\sqrt{\Delta t}}~ \sigma_{g_c} \\ -\sigma_{bg} &= \sqrt{\Delta t}~ \sigma_{bg_c} \\[1em] -\mathbf{Q}_{meas} &= -\begin{bmatrix} -\frac{1}{\Delta t}~ \sigma_{g_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\ -\mathbf{0}_3 & \frac{1}{\Delta t}~ \sigma_{a_c}^2~ \mathbf{I}_3 -\end{bmatrix} \\ -\mathbf{Q}_{bias} &= -\begin{bmatrix} -\Delta t~ \sigma_{bg_c}^2~ \mathbf{I}_3 & \mathbf{0}_3 \\ -\mathbf{0}_3 & \Delta t~ \sigma_{ba_c}^2~ \mathbf{I}_3 -\end{bmatrix} -\f} - -where \f$\mathbf{n} = [ \mathbf{n}_g ~ \mathbf{n}_a ~ \mathbf{n}_{bg} ~ \mathbf{n}_{ba} ]^\top\f$ are the discrete -IMU sensor noises which have been converted from their continuous representations. -We define the stacked discrete measurement noise as follows: - -\f{align*}{ -\mathbf{Q}_{d} &= -\begin{bmatrix} -\mathbf{Q}_{meas} & \mathbf{0}_3 \\ -\mathbf{0}_3 & \mathbf{Q}_{bias} -\end{bmatrix} -\f} - -The method of computing Jacobians is to "perturb" each variable in the system and see how the old error "perturbation" relates to the new error state. -That is, -\f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ can be found by perturbing each variable as: -\f{align*}{ -\tilde{\mathbf{x}}_I(t_{k+1}) = \boldsymbol{\Phi}(t_{k+1},t_k) \tilde{\mathbf{x}}_I(t_{k}) + \mathbf{G}_{k} \mathbf{n} -\f} -For the orientation error propagation, we start with the \f$\mathbf{SO}(3)\f$ perturbation using -\f${}^{I}_G \mathbf{R} \approx (\mathbf{I}_3 - \lfloor ^{I}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I}_{G} \hat{\mathbf{R}}\f$: -\f{align*}{ -{}^{I_{k+1}}_G \mathbf{R} &= \text{}^{I_{k+1}}_{I_{k}} \mathbf{R} \text{}^{I_{k}}_G \mathbf{R} \\ -(\mathbf{I}_3 - \lfloor ^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k+1}}_{G} -\hat{\mathbf{R}} -&\approx \textrm{exp}(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t - {}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t) -(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G} -\hat{\mathbf{R}}\\ -&=\textrm{exp}(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t)\textrm{exp}(-\mathbf{J}_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t){}^{I_{k}}\tilde{\boldsymbol{\omega}}\Delta t) -(\mathbf{I}_3 - \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor)^{I_{k}}_{G} -\hat{\mathbf{R}}\\ -&=\text{}^{I_{k+1}}_{I_{k}} \hat{\mathbf{R}} - (\mathbf{I}_3 - \lfloor \mathbf J_r(-{}^{I_{k}}\hat{\boldsymbol{\omega}}\Delta t) - \tilde{\boldsymbol{\omega}}_k\Delta t \times\rfloor) -(\mathbf{I}_3 - \lfloor ^{I_k}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) -\text{}^{I_{k}}_G \hat{\mathbf{R}} -\f} - -where \f$\tilde{\boldsymbol{\omega}} = \boldsymbol{\omega} - \hat{\boldsymbol{\omega}} -= -(\tilde{\mathbf{b}}_{\mathbf{g}} + \mathbf{n}_g)\f$ handles both the perturbation to the bias and measurement noise. -\f$\mathbf {J}_r(\boldsymbol{\theta})\f$ is the right Jacobian of \f$\mathbf{SO}(3)\f$ -that maps the variation of rotation angle in the parameter vector space into the variation in the tangent vector space to the manifold -[see @ref ov_core::Jr_so3()]. -By neglecting the second order terms from above, we obtain the following orientation error propagation: - -\f{align*} -\text{}^{I_{k+1}}_{G}\tilde{\boldsymbol{\theta}} \approx -\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} \text{}^{I_k}_{G}\tilde{\boldsymbol{\theta}} -- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) -\Delta t (\tilde{\mathbf{b}}_{\mathbf{g},k} + \mathbf{n}_{\mathbf{g},k}) -\f} - -Now we can do error propagation of position and velocity using the same scheme: - -\f{align*}{ -^G\mathbf{p}_{I_{k+1}} - &= \text{}^G\mathbf{p}_{I_k} + \text{}^G\mathbf{v}_{I_k} \Delta t - - \frac{1}{2}{}^G\mathbf{g}\Delta t^2 - + \frac{1}{2}\text{}^{I_k}_G\mathbf{R}^\top \mathbf{a}_{k}\Delta t^2\\ - -^G\hat{\mathbf{p}}_{I_{k+1}} + \text{}^G\tilde{\mathbf{p}}_{I_{k+1}} - &\approx \text{}^G\hat{\mathbf{p}}_{I_k} + \text{}^G\tilde{\mathbf{p}}_{I_k} - + \text{}^G\hat{\mathbf{v}}_{I_k} \Delta t - + \text{}^G\tilde{\mathbf{v}}_{I_k} \Delta t - - \frac{1}{2}{}^G\mathbf{g}\Delta t^2\\ - &\hspace{4cm} + \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top - (\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) - (\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t^2\\ -\\ -^G\mathbf{v}_{k+1} &= \text{}^G\mathbf{v}_{I_k} - {}^G\mathbf{g}\Delta t -+\text{}^{I_k}_G\mathbf{R}^\top\mathbf{a}_{k}\Delta t\\ - -^G\hat{\mathbf{v}}_{k+1} + ^G\tilde{\mathbf{v}}_{k+1} &\approx -{}^G\hat{\mathbf{v}}_{I_k} + {}^G\tilde{\mathbf{v}}_{I_k} -- {}^G\mathbf{g}\Delta t -+ \text{}^{I_k}_G\hat{\mathbf{R}}^\top -(\mathbf{I}_3 + \lfloor ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}}\times\rfloor) -(\hat{\mathbf{a}}_{k} + \tilde{\mathbf{a}}_{k})\Delta t -\f} - -where \f$\tilde{\mathbf{a}} = \mathbf{a} - \hat{\mathbf{a}} - = - (\tilde{\mathbf{b}}_{\mathbf{a}} + \mathbf{n}_{\mathbf{a}})\f$. -By neglecting the second order error terms, we obtain the following position and velocity error propagation: - -\f{align*}{ -\text{}^G\tilde{\mathbf{p}}_{I_{k+1}} &= -\text{}^G\tilde{\mathbf{p}}_{I_k} -+ \Delta t \text{}^G\tilde{\mathbf{v}}_{I_k} -- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top -\lfloor \hat{\mathbf{a}}_{k} \Delta t^2 \times\rfloor -^{I_{k}}_{G}\tilde{\boldsymbol{\theta}} -- \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2 -(\tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{\mathbf{a},k})\\ -^G\tilde{\mathbf{v}}_{k+1} &= -\text{}^G\tilde{\mathbf{v}}_{I_k} - - \text{}^{I_k}_G\hat{\mathbf{R}}^\top - \lfloor \hat{\mathbf{a}}_{k} \Delta t \times\rfloor - ^{I_{k}}_{G}\tilde{\boldsymbol{\theta}} - - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t - (\tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{\mathbf{a},k}) -\f} - -The propagation of the two random-walk biases are as follows: - -\f{align*}{ - \mathbf{b}_{\mathbf{g},k+1} &= \mathbf{b}_{\mathbf{g},k} + \mathbf{n}_{wg} \\ - \hat{\mathbf{b}}_{\mathbf{g},k+1} + \tilde{\mathbf{b}}_{\mathbf{g},k+1} &= - \hat{\mathbf{b}}_{\mathbf{g},k} + \tilde{\mathbf{b}}_{\mathbf{g},k} - + \mathbf{n}_{wg} \\ - \tilde{\mathbf{b}}_{\mathbf{g},k+1} &= - \tilde{\mathbf{b}}_{\mathbf{g},k} + \mathbf{n}_{wg} \\[1em] - -\mathbf{b}_{\mathbf{a},k+1} &= \mathbf{b}_{\mathbf{a},k} + \mathbf{n}_{wa} \\ - \hat{\mathbf{b}}_{\mathbf{a},k+1} + \tilde{\mathbf{b}}_{\mathbf{a},k+1} &= - \hat{\mathbf{b}}_{\mathbf{a},k} + \tilde{\mathbf{b}}_{\mathbf{a},k} - + \mathbf{n}_{wa} \\ - \tilde{\mathbf{b}}_{\mathbf{a},k+1} &= - \tilde{\mathbf{b}}_{\mathbf{a},k} + \mathbf{n}_{wa} -\f} - -By collecting all the perturbation results, we can build \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices as: - - -\f{align*}{ -\boldsymbol{\Phi}(t_{k+1},t_k) &= -\begin{bmatrix} -\text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}} & \mathbf{0}_3 & \mathbf{0}_3 & -- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) -\Delta t & \mathbf{0}_3 \\ - -- \frac{1}{2}\text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t^2 \times\rfloor -& \mathbf{I}_3 & \Delta t \mathbf{I}_3 & \mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2 \\ - -- \text{}^{I_k}_G\hat{\mathbf{R}}^\top \lfloor \hat{\mathbf{a}}_{k} \Delta t \times\rfloor -& \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t \\ - -\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3 & \mathbf{0}_3 \\ -\mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{0}_3 & \mathbf{I}_3 -\end{bmatrix} -\f} - - - -\f{align*}{ -\mathbf{G}_{k} &= -\begin{bmatrix} -- \text{}^{I_{k+1}}_{I_{k}}\hat{\mathbf{R}}\mathbf J_r(\text{}^{I_{k+1}}_{I_{k}}\hat{\boldsymbol{\theta}}) -\Delta t & \mathbf{0}_3 -& \mathbf{0}_3 & \mathbf{0}_3 \\ - -\mathbf{0}_3 & - \frac{1}{2} \text{}^{I_k}_{G}\hat{\mathbf{R}}^\top \Delta t^2 -& \mathbf{0}_3 & \mathbf{0}_3 \\ - -\mathbf{0}_3 & - \text{}^{I_k}_G\hat{\mathbf{R}}^\top \Delta t -& \mathbf{0}_3 & \mathbf{0}_3 \\ - -\mathbf{0}_3 & \mathbf{0}_3 -& \mathbf{I}_3 & \mathbf{0}_3 \\ - -\mathbf{0}_3 & \mathbf{0}_3 -& \mathbf{0}_3 & \mathbf{I}_3 -\end{bmatrix} -\f} - - - - -Now, with the computed \f$\boldsymbol{\Phi}(t_{k+1},t_k)\f$ and \f$\mathbf{G}_{k}\f$ matrices, -we can propagate the covariance from \f$t_k\f$ to \f$t_{k+1}\f$: -\f{align*}{ -\mathbf{P}_{k+1|k} = \boldsymbol{\Phi}(t_{k+1},t_k)\mathbf{P}_{k|k}\boldsymbol{\Phi}(t_{k+1},t_k)^\top + \mathbf{G}_k\mathbf{Q}_d\mathbf{G}_k^\top -\f} */ \ No newline at end of file diff --git a/docs/update-delay.dox b/docs/update-delay.dox index f978f47ae..478352b3f 100644 --- a/docs/update-delay.dox +++ b/docs/update-delay.dox @@ -4,6 +4,9 @@ @page update-delay Delayed Feature Initialization We describe a method of delayed initialization of a 3D point feature as in [Visual-Inertial Odometry on Resource-Constrained Systems](https://escholarship.org/content/qt4nn0j264/qt4nn0j264.pdf) @cite Li2014THESIS. +It is important to note that this is just an example of how to leverage delayed initialization which is very pertinent to visual-inertial state estimation. +In practice, as long as sufficient measurements are available to constrain the new state (e.g. ENU-to-VIO transform, sensor calibration, object pose, etc..), the below delayed initialization procedure can be applied directly. + Specifically, given a set of measurements involving the state \f$\mathbf{x}\f$ and a new feature \f$\mathbf{f}\f$, we want to optimally and efficiently initialize the feature. \f{align*}{ @@ -85,6 +88,9 @@ For example when initializing a new feature to the end of the state, the augment Note that this process does not update the estimate for \f$\mathbf{x}\f$. However, after initialization, we can then use the second system, \f$\mathbf{r}_2\f$, \f$\mathbf{H}_{x2}\f$, and \f$\mathbf{n}_2\f$ to update our new state through a standard EKF update (see @ref linear-meas section). +We note that if there are exactly enough measurements to initialize the state, then no update occurs. +This make sense, as the system is not yet overconstrained, thus only the to-be-initialized state and its uncertainty can be found (it does not yet benefit the navigation state estimates). +Some additional notes on performing delayed initialization can be found in [this](https://pgeneva.com/downloads/notes/2022_notes_delay_init.pdf) technical report. */ \ No newline at end of file diff --git a/docs/update-featinit.dox b/docs/update-featinit.dox index 93232d125..e717496df 100644 --- a/docs/update-featinit.dox +++ b/docs/update-featinit.dox @@ -264,8 +264,8 @@ Therefore, we can have the least-squares formulated and Jacobians: \frac{\partial \mathbf{h}(u_A, v_A, \rho_A)}{\partial {h}(u_A, v_A, \rho_A)} & = \begin{bmatrix} - 1/h(\cdots)(1) & 0 & -h(\cdots)(1)/h(\cdots)(3)^2 \\ - 0 & 1/h(\cdots)(2) & -h(\cdots)(2)/h(\cdots)(3)^2 + 1/h(\cdots)(3) & 0 & -h(\cdots)(1)/h(\cdots)(3)^2 \\ + 0 & 1/h(\cdots)(3) & -h(\cdots)(2)/h(\cdots)(3)^2 \end{bmatrix} \\ \frac{\partial {h}(u_A, v_A, \rho_A)}{\partial [u_A, v_A, \rho_A]} & = diff --git a/ov_core/package.xml b/ov_core/package.xml index 76192b66f..f4a0e7d8a 100644 --- a/ov_core/package.xml +++ b/ov_core/package.xml @@ -3,7 +3,7 @@ ov_core - 2.6.3 + 2.7.0 Core algorithms for visual-inertial navigation algorithms. diff --git a/ov_core/src/cam/CamBase.h b/ov_core/src/cam/CamBase.h index 60788f903..da1cbcf73 100644 --- a/ov_core/src/cam/CamBase.h +++ b/ov_core/src/cam/CamBase.h @@ -34,10 +34,7 @@ namespace ov_core { * * This is the base class for all our camera models. * All these models are pinhole cameras, thus just have standard reprojection logic. - * - * See each base class for detailed examples on each model: - * - @ref ov_core::CamEqui - * - @ref ov_core::CamRadtan + * See each derived class for detailed examples of each model. */ class CamBase { diff --git a/ov_core/src/dummy.cpp b/ov_core/src/dummy.cpp index 52d76b678..a8d379636 100644 --- a/ov_core/src/dummy.cpp +++ b/ov_core/src/dummy.cpp @@ -68,6 +68,11 @@ namespace ov_core {} * }; * @endcode * + * When deriving Jacobians, it is important to ensure that the error state used matches the type. + * Each type updates an update function, and thus directly defines the error state it has. + * A type with non-trivial error states is the @ref ov_type::JPLQuat which has equivalent quaterion and SO(3) error. + * For rotations and on-manifold representations, [State Estimation for Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) + * by Timothy D. Barfoot @cite Barfoot2017 covers a nice range of examples. * */ namespace ov_type {} diff --git a/ov_core/src/track/TrackKLT.cpp b/ov_core/src/track/TrackKLT.cpp index 21cc0325c..d2fe388f1 100644 --- a/ov_core/src/track/TrackKLT.cpp +++ b/ov_core/src/track/TrackKLT.cpp @@ -457,7 +457,7 @@ void TrackKLT::perform_detection_monocular(const std::vector &img0pyr, if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) { cv::Point pt1(x - min_px_dist, y - min_px_dist); cv::Point pt2(x + min_px_dist, y + min_px_dist); - cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255)); + cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255), -1); } it0++; it1++; @@ -593,7 +593,7 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con if (x - min_px_dist >= 0 && x + min_px_dist < img0pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img0pyr.at(0).rows) { cv::Point pt1(x - min_px_dist, y - min_px_dist); cv::Point pt2(x + min_px_dist, y + min_px_dist); - cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255)); + cv::rectangle(mask0_updated, pt1, pt2, cv::Scalar(255), -1); } it0++; it1++; @@ -662,8 +662,8 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con if (!pts0_new.empty()) { // Do our KLT tracking from the left to the right frame of reference - // Note: we have a pretty big window size here since our projection might be bad - // Note: but this might cause failure in cases of repeated textures (eg. checkerboard) + // NOTE: we have a pretty big window size here since our projection might be bad + // NOTE: but this might cause failure in cases of repeated textures (eg. checkerboard) std::vector mask; // perform_matching(img0pyr, img1pyr, kpts0_new, kpts1_new, cam_id_left, cam_id_right, mask); std::vector error; @@ -674,22 +674,18 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con // Loop through and record only ones that are valid for (size_t i = 0; i < pts0_new.size(); i++) { - // Check that it is in bounds - if ((int)pts0_new.at(i).x < 0 || (int)pts0_new.at(i).x >= img0pyr.at(0).cols || (int)pts0_new.at(i).y < 0 || - (int)pts0_new.at(i).y >= img0pyr.at(0).rows) { - continue; - } - if ((int)pts1_new.at(i).x < 0 || (int)pts1_new.at(i).x >= img1pyr.at(0).cols || (int)pts1_new.at(i).y < 0 || - (int)pts1_new.at(i).y >= img1pyr.at(0).rows) { - continue; - } + // Check to see if the feature is out of bounds (oob) in either image + bool oob_left = ((int)pts0_new.at(i).x < 0 || (int)pts0_new.at(i).x >= img0pyr.at(0).cols || (int)pts0_new.at(i).y < 0 || + (int)pts0_new.at(i).y >= img0pyr.at(0).rows); + bool oob_right = ((int)pts1_new.at(i).x < 0 || (int)pts1_new.at(i).x >= img1pyr.at(0).cols || (int)pts1_new.at(i).y < 0 || + (int)pts1_new.at(i).y >= img1pyr.at(0).rows); // Check to see if it there is already a feature in the right image at this location // 1) If this is not already in the right image, then we should treat it as a stereo // 2) Otherwise we will treat this as just a monocular track of the feature // TODO: we should check to see if we can combine this new feature and the one in the right // TODO: seems if reject features which overlay with right features already we have very poor tracking perf - if (mask[i] == 1) { + if (!oob_left && !oob_right && mask[i] == 1) { // update the uv coordinates kpts0_new.at(i).pt = pts0_new.at(i); kpts1_new.at(i).pt = pts1_new.at(i); @@ -700,7 +696,7 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con size_t temp = ++currid; ids0.push_back(temp); ids1.push_back(temp); - } else { + } else if (!oob_left) { // update the uv coordinates kpts0_new.at(i).pt = pts0_new.at(i); // append the new uv coordinate @@ -722,6 +718,7 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con float size_y1 = (float)img1pyr.at(0).rows / (float)grid_y; cv::Size size_grid1(grid_x, grid_y); // width x height cv::Mat grid_2d_grid1 = cv::Mat::zeros(size_grid1, CV_8UC1); + cv::Mat mask1_updated = mask0.clone(); it0 = pts1.begin(); it1 = ids1.begin(); while (it0 != pts1.end()) { @@ -751,17 +748,15 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con it1 = ids1.erase(it1); continue; } - // Check if this is a stereo point - bool is_stereo = (std::find(ids0.begin(), ids0.end(), *it1) != ids0.end()); // Check if this keypoint is near another point // NOTE: if it is *not* a stereo point, then we will not delete the feature // NOTE: this means we might have a mono and stereo feature near each other, but that is ok - if (grid_2d_close1.at(y_grid, x_grid) > 127 && !is_stereo) { + bool is_stereo = (std::find(ids0.begin(), ids0.end(), *it1) != ids0.end()); + if (grid_2d_close1.at(y_close, x_close) > 127 && !is_stereo) { it0 = pts1.erase(it0); it1 = ids1.erase(it1); continue; } - // Now check if it is in a mask area or not // NOTE: mask has max value of 255 (white) if it should be if (mask1.at(y, x) > 127) { @@ -770,10 +765,16 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con continue; } // Else we are good, move forward to the next point - grid_2d_close1.at(y_grid, x_grid) = 255; + grid_2d_close1.at(y_close, x_close) = 255; if (grid_2d_grid1.at(y_grid, x_grid) < 255) { grid_2d_grid1.at(y_grid, x_grid) += 1; } + // Append this to the local mask of the image + if (x - min_px_dist >= 0 && x + min_px_dist < img1pyr.at(0).cols && y - min_px_dist >= 0 && y + min_px_dist < img1pyr.at(0).rows) { + cv::Point pt1(x - min_px_dist, y - min_px_dist); + cv::Point pt2(x + min_px_dist, y + min_px_dist); + cv::rectangle(mask1_updated, pt1, pt2, cv::Scalar(255), -1); + } it0++; it1++; } @@ -786,7 +787,7 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con // This is old extraction code that would extract from the whole image // This can be slow as this will recompute extractions for grid areas that we have max features already // std::vector pts1_ext; - // Grider_FAST::perform_griding(img1pyr.at(0), mask1, pts1_ext, num_features, grid_x, grid_y, threshold, true); + // Grider_FAST::perform_griding(img1pyr.at(0), mask1_updated, pts1_ext, num_features, grid_x, grid_y, threshold, true); // We also check a downsampled mask such that we don't extract in areas where it is all masked! cv::Mat mask1_grid; @@ -804,7 +805,7 @@ void TrackKLT::perform_detection_stereo(const std::vector &img0pyr, con } } std::vector pts1_ext; - Grider_GRID::perform_griding(img1pyr.at(0), mask1, valid_locs, pts1_ext, num_features, grid_x, grid_y, threshold, true); + Grider_GRID::perform_griding(img1pyr.at(0), mask1_updated, valid_locs, pts1_ext, num_features, grid_x, grid_y, threshold, true); // Now, reject features that are close a current feature for (auto &kpt : pts1_ext) { diff --git a/ov_core/src/types/JPLQuat.h b/ov_core/src/types/JPLQuat.h index 43e546e63..48a547b1b 100644 --- a/ov_core/src/types/JPLQuat.h +++ b/ov_core/src/types/JPLQuat.h @@ -35,6 +35,59 @@ namespace ov_type { * We recommend that people new quaternions check out the following resources: * - http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf * - ftp://naif.jpl.nasa.gov/pub/naif/misc/Quaternion_White_Paper/Quaternions_White_Paper.pdf + * + * + * We need to take special care to handle edge cases when converting to and from other rotation formats. + * All equations are based on the following tech report @cite Trawny2005TR : + * + * > Trawny, Nikolas, and Stergios I. Roumeliotis. "Indirect Kalman filter for 3D attitude estimation." + * > University of Minnesota, Dept. of Comp. Sci. & Eng., Tech. Rep 2 (2005): 2005. + * > http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf + * + * @section jplquat_define JPL Quaternion Definition + * + * We define the quaternion as the following linear combination: + * @f[ + * \bar{q} = q_4+q_1\mathbf{i}+q_2\mathbf{j}+q_3\mathbf{k} + * @f] + * Where i,j,k are defined as the following: + * @f[ + * \mathbf{i}^2=-1~,~\mathbf{j}^2=-1~,~\mathbf{k}^2=-1 + * @f] + * @f[ + * -\mathbf{i}\mathbf{j}=\mathbf{j}\mathbf{i}=\mathbf{k} + * ~,~ + * -\mathbf{j}\mathbf{k}=\mathbf{k}\mathbf{j}=\mathbf{i} + * ~,~ + * -\mathbf{k}\mathbf{i}=\mathbf{i}\mathbf{k}=\mathbf{j} + * @f] + * As noted in @cite Trawny2005TR this does not correspond to the Hamilton notation, and follows the "JPL Proposed Standard Conventions". + * The q_4 quantity is the "scalar" portion of the quaternion, while q_1, q_2, q_3 are part of the "vector" portion. + * We split the 4x1 vector into the following convention: + * @f[ + * \bar{q} = \begin{bmatrix}q_1\\q_2\\q_3\\q_4\end{bmatrix} = \begin{bmatrix}\mathbf{q}\\q_4\end{bmatrix} + * @f] + * It is also important to note that the quaternion is constrained to the unit circle: + * @f[ + * |\bar{q}| = \sqrt{\bar{q}^\top\bar{q}} = \sqrt{|\mathbf{q}|^2+q_4^2} = 1 + * @f] + * + * + * @section jplquat_errorstate Error State Definition + * + * It is important to note that one can prove that the left-multiplicative quaternion error is equivalent to the SO(3) error. + * If one wishes to use the right-hand error, this would need to be implemented as a different type then this class! + * This is noted in Eq. (71) in @cite Trawny2005TR . + * Specifically we have the following: + * \f{align*}{ + * {}^{I}_G\bar{q} &\simeq \begin{bmatrix} \frac{1}{2} \delta \boldsymbol{\theta} \\ 1 \end{bmatrix} \otimes {}^{I}_G\hat{\bar{q}} + * \f} + * which is the same as: + * \f{align*}{ + * {}^{I}_G \mathbf{R} &\simeq \exp(-\delta \boldsymbol{\theta}) {}^{I}_G \hat{\mathbf{R}} \\ + * &\simeq (\mathbf{I} - \lfloor \delta \boldsymbol{\theta} \rfloor) {}^{I}_G \hat{\mathbf{R}} \\ + * \f} + * */ class JPLQuat : public Type { @@ -53,7 +106,7 @@ class JPLQuat : public Type { * quaternion with a quaternion built from a small axis-angle perturbation. * * @f[ - * \bar{q}=norm\Big(\begin{bmatrix} 0.5*\mathbf{\theta_{dx}} \\ 1 \end{bmatrix}\Big) \hat{\bar{q}} + * \bar{q}=norm\Big(\begin{bmatrix} \frac{1}{2} \delta \boldsymbol{\theta}_{dx} \\ 1 \end{bmatrix}\Big) \otimes \hat{\bar{q}} * @f] * * @param dx Axis-angle representation of the perturbing quaternion @@ -73,13 +126,13 @@ class JPLQuat : public Type { /** * @brief Sets the value of the estimate and recomputes the internal rotation matrix - * @param new_value New value for the quaternion estimate + * @param new_value New value for the quaternion estimate (JPL quat as x,y,z,w) */ void set_value(const Eigen::MatrixXd &new_value) override { set_value_internal(new_value); } /** * @brief Sets the fej value and recomputes the fej rotation matrix - * @param new_value New value for the quaternion estimate + * @param new_value New value for the quaternion estimate (JPL quat as x,y,z,w) */ void set_fej(const Eigen::MatrixXd &new_value) override { set_fej_internal(new_value); } diff --git a/ov_core/src/types/Vec.h b/ov_core/src/types/Vec.h index a9bc41903..2725182b0 100644 --- a/ov_core/src/types/Vec.h +++ b/ov_core/src/types/Vec.h @@ -45,6 +45,11 @@ class Vec : public Type { /** * @brief Implements the update operation through standard vector addition + * + * \f{align*}{ + * \mathbf{v} &= \hat{\mathbf{v}} + \tilde{\mathbf{v}}_{dx} + * \f} + * * @param dx Additive error state correction */ void update(const Eigen::VectorXd &dx) override { diff --git a/ov_core/src/utils/opencv_lambda_body.h b/ov_core/src/utils/opencv_lambda_body.h index a4c85bb5e..1c4cbff8e 100644 --- a/ov_core/src/utils/opencv_lambda_body.h +++ b/ov_core/src/utils/opencv_lambda_body.h @@ -27,7 +27,7 @@ namespace ov_core { -/** +/* * @brief Helper class to do OpenCV parallelization * * This is a utility class required to build with older version of opencv diff --git a/ov_core/src/utils/opencv_yaml_parse.h b/ov_core/src/utils/opencv_yaml_parse.h index fd2d3921f..3ee981483 100644 --- a/ov_core/src/utils/opencv_yaml_parse.h +++ b/ov_core/src/utils/opencv_yaml_parse.h @@ -43,13 +43,17 @@ namespace ov_core { * @brief Helper class to do OpenCV yaml parsing from both file and ROS. * * The logic is as follows: - * - Given a path to the main config file we will load it into our cv::FileStorage object. + * - Given a path to the main config file we will load it into our + * [cv::FileStorage](https://docs.opencv.org/4.x/da/d56/classcv_1_1FileStorage.html) object. * - From there the user can request for different parameters of different types from the config. * - If we have ROS, then we will also check to see if the user has overridden any config files via ROS. * - The ROS parameters always take priority over the ones in our config. * * NOTE: There are no "nested" yaml parameters. They are all under the "root" of the yaml file!!! - * NOTE: The camera and imu have nested, but those are handled externally.... + * This limits things quite a bit, but simplified the below implementation. + * + * NOTE: The camera and imu have nested, but those are handled externally. + * They first read the "imu0" or "cam1" level, after-which all values are at the same level. */ class YamlParser { public: diff --git a/ov_core/src/utils/quat_ops.h b/ov_core/src/utils/quat_ops.h index 1662943e5..6b1e7c337 100644 --- a/ov_core/src/utils/quat_ops.h +++ b/ov_core/src/utils/quat_ops.h @@ -26,7 +26,8 @@ * @section Summary * This file contains the common utility functions for operating on JPL quaternions. * We need to take special care to handle edge cases when converting to and from other rotation formats. - * All equations are based on the following tech report: + * All equations are based on the following tech report @cite Trawny2005TR : + * * > Trawny, Nikolas, and Stergios I. Roumeliotis. "Indirect Kalman filter for 3D attitude estimation." * > University of Minnesota, Dept. of Comp. Sci. & Eng., Tech. Rep 2 (2005): 2005. * > http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf @@ -48,13 +49,13 @@ * ~,~ * -\mathbf{k}\mathbf{i}=\mathbf{i}\mathbf{k}=\mathbf{j} * @f] - * As noted in [Trawny2005] this does not correspond to the Hamilton notation, and follows the "JPL Proposed Standard Conventions". + * As noted in @cite Trawny2005TR this does not correspond to the Hamilton notation, and follows the "JPL Proposed Standard Conventions". * The q_4 quantity is the "scalar" portion of the quaternion, while q_1,q_2,q_3 are part of the "vector" portion. * We split the 4x1 vector into the following convention: * @f[ * \bar{q} = \begin{bmatrix}q_1\\q_2\\q_3\\q_4\end{bmatrix} = \begin{bmatrix}\mathbf{q}\\q_4\end{bmatrix} * @f] - * It is also important to note that the quaternion is constrainted to the unit circle: + * It is also important to note that the quaternion is constrained to the unit circle: * @f[ * |\bar{q}| = \sqrt{\bar{q}^\top\bar{q}} = \sqrt{|\mathbf{q}|^2+q_4^2} = 1 * @f] @@ -465,7 +466,18 @@ inline Eigen::Matrix Inv(Eigen::Matrix q) { * * See equation (48) of trawny tech report [Indirect Kalman Filter for 3D Attitude * Estimation](http://mars.cs.umn.edu/tr/reports/Trawny05b.pdf). + * This matrix is derived in Section 1.5 of the report by finding the Quaterion Time Derivative. * + * \f{align*}{ + * \boldsymbol{\Omega}(\boldsymbol{\omega}) &= + * \begin{bmatrix} + * -\lfloor{\boldsymbol{\omega}} \rfloor & \boldsymbol{\omega} \\ + * -\boldsymbol{\omega}^\top & 0 + * \end{bmatrix} + * \f} + * + * @param w Angular velocity + * @return The matrix \f$\boldsymbol{\Omega}\f$ */ inline Eigen::Matrix Omega(Eigen::Matrix w) { Eigen::Matrix mat; @@ -492,10 +504,10 @@ inline Eigen::Matrix quatnorm(Eigen::Matrix q_t) { * @brief Computes left Jacobian of SO(3) * * The left Jacobian of SO(3) is defined equation (7.77b) in [State Estimation for - * Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot. Specifically it is the following (with - * \f$\theta=|\boldsymbol\theta|\f$ and \f$\mathbf a=\boldsymbol\theta/|\boldsymbol\theta|\f$): \f{align*}{ J_l(\boldsymbol\theta) = - * \frac{\sin\theta}{\theta}\mathbf I + \Big(1-\frac{\sin\theta}{\theta}\Big)\mathbf a \mathbf a^\top + \frac{1-\cos\theta}{\theta}\lfloor - * \mathbf a \times\rfloor \f} + * Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot @cite Barfoot2017. Specifically it is the + * following (with \f$\theta=|\boldsymbol\theta|\f$ and \f$\mathbf a=\boldsymbol\theta/|\boldsymbol\theta|\f$): \f{align*}{ + * J_l(\boldsymbol\theta) = \frac{\sin\theta}{\theta}\mathbf I + \Big(1-\frac{\sin\theta}{\theta}\Big)\mathbf a \mathbf a^\top + + * \frac{1-\cos\theta}{\theta}\lfloor \mathbf a \times\rfloor \f} * * @param w axis-angle * @return The left Jacobian of SO(3) @@ -516,8 +528,8 @@ inline Eigen::Matrix Jl_so3(const Eigen::Matrix &w) * @brief Computes right Jacobian of SO(3) * * The right Jacobian of SO(3) is related to the left by Jl(-w)=Jr(w). - * See equation (7.87) in [State Estimation for Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot. - * See @ref Jl_so3() for the definition of the left Jacobian of SO(3). + * See equation (7.87) in [State Estimation for Robotics](http://asrl.utias.utoronto.ca/~tdb/bib/barfoot_ser17.pdf) by Timothy D. Barfoot + * @cite Barfoot2017. See @ref Jl_so3() for the definition of the left Jacobian of SO(3). * * @param w axis-angle * @return The right Jacobian of SO(3) @@ -531,7 +543,7 @@ inline Eigen::Matrix Jr_so3(const Eigen::Matrix &w) * If you are interested in how to compute Jacobians checkout this report: * http://mars.cs.umn.edu/tr/reports/Trawny05c.pdf * - * @param rot Rotation matrix + * @param rot SO(3) rotation matrix * @return roll, pitch, yaw values (in that order) */ inline Eigen::Matrix rot2rpy(const Eigen::Matrix &rot) { @@ -549,7 +561,18 @@ inline Eigen::Matrix rot2rpy(const Eigen::Matrix &ro /** * @brief Construct rotation matrix from given roll + * + * \f{align*}{ + * \mathbf{R}_x(t) &= + * \begin{bmatrix} + * 1 & 0 & 0 \\ + * 0 & \cos(t) & -\sin(t) \\ + * 0 & \sin(t) & \cos(t) + * \end{bmatrix} + * \f} + * * @param t roll angle + * @return SO(3) rotation matrix */ inline Eigen::Matrix rot_x(double t) { Eigen::Matrix r; @@ -561,7 +584,18 @@ inline Eigen::Matrix rot_x(double t) { /** * @brief Construct rotation matrix from given pitch + * + * \f{align*}{ + * \mathbf{R}_y(t) &= + * \begin{bmatrix} + * \cos(t) & 0 & \sin(t) \\ + * 0 & 1 & 0 \\ + * -\sin(t) & 0 & \cos(t) + * \end{bmatrix} + * \f} + * * @param t pitch angle + * @return SO(3) rotation matrix */ inline Eigen::Matrix rot_y(double t) { Eigen::Matrix r; @@ -573,7 +607,18 @@ inline Eigen::Matrix rot_y(double t) { /** * @brief Construct rotation matrix from given yaw + * + * \f{align*}{ + * \mathbf{R}_z(t) &= + * \begin{bmatrix} + * \cos(t) & -\sin(t) & 0 \\ + * \sin(t) & \cos(t) & 0 \\ + * 0 & 0 & 1 + * \end{bmatrix} + * \f} + * * @param t yaw angle + * @return SO(3) rotation matrix */ inline Eigen::Matrix rot_z(double t) { Eigen::Matrix r; diff --git a/ov_data/package.xml b/ov_data/package.xml index 0ec169cd4..dc249cab5 100644 --- a/ov_data/package.xml +++ b/ov_data/package.xml @@ -3,7 +3,7 @@ ov_data - 2.6.3 + 2.7.0 Data for the OpenVINS project, mostly just groundtruth files... diff --git a/ov_eval/package.xml b/ov_eval/package.xml index 4155ea3bc..f63af370a 100644 --- a/ov_eval/package.xml +++ b/ov_eval/package.xml @@ -3,7 +3,7 @@ ov_eval - 2.6.3 + 2.7.0 Evaluation methods and scripts for visual-inertial odometry systems. diff --git a/ov_eval/src/calc/ResultSimulation.cpp b/ov_eval/src/calc/ResultSimulation.cpp index aec3490d7..c4d6ece03 100644 --- a/ov_eval/src/calc/ResultSimulation.cpp +++ b/ov_eval/src/calc/ResultSimulation.cpp @@ -126,6 +126,7 @@ void ResultSimulation::plot_state(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (deg)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); //===================================================== @@ -142,6 +143,7 @@ void ResultSimulation::plot_state(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (m)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); //===================================================== @@ -158,6 +160,7 @@ void ResultSimulation::plot_state(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (m/s)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); //===================================================== @@ -174,6 +177,7 @@ void ResultSimulation::plot_state(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (rad/s)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); //===================================================== @@ -190,6 +194,7 @@ void ResultSimulation::plot_state(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (m/s^2)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); //===================================================== @@ -232,7 +237,6 @@ void ResultSimulation::plot_timeoff(bool doplotting, double max_time) { return; #else - //===================================================== // Plot this figure matplotlibcpp::figure_size(800, 250); @@ -267,8 +271,8 @@ void ResultSimulation::plot_timeoff(bool doplotting, double max_time) { matplotlibcpp::title("Camera IMU Time Offset Error"); matplotlibcpp::ylabel("error (sec)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); - //===================================================== #endif } @@ -336,7 +340,6 @@ void ResultSimulation::plot_cam_instrinsics(bool doplotting, double max_time) { std::vector colors = {"blue", "red", "black", "green", "cyan", "magenta"}; assert(error_cam_k.size() <= colors.size()); - //===================================================== // Plot this figure matplotlibcpp::figure_size(800, 600); for (int n = 0; n < (int)est_state.at(0)(18); n++) { @@ -356,10 +359,9 @@ void ResultSimulation::plot_cam_instrinsics(bool doplotting, double max_time) { matplotlibcpp::subplot(4, 1, 4); matplotlibcpp::ylabel("cy (px)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); - //===================================================== - //===================================================== // Plot this figure matplotlibcpp::figure_size(800, 600); for (int n = 0; n < (int)est_state.at(0)(18); n++) { @@ -379,8 +381,8 @@ void ResultSimulation::plot_cam_instrinsics(bool doplotting, double max_time) { matplotlibcpp::subplot(4, 1, 4); matplotlibcpp::ylabel("d4"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); - //===================================================== #endif } @@ -454,7 +456,6 @@ void ResultSimulation::plot_cam_extrinsics(bool doplotting, double max_time) { std::vector colors = {"blue", "red", "black", "green", "cyan", "magenta"}; assert(error_cam_ori.size() <= colors.size()); - //===================================================== // Plot this figure matplotlibcpp::figure_size(800, 500); for (int n = 0; n < (int)est_state.at(0)(18); n++) { @@ -472,10 +473,9 @@ void ResultSimulation::plot_cam_extrinsics(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (deg)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); - //===================================================== - //===================================================== // Plot this figure matplotlibcpp::figure_size(800, 500); for (int n = 0; n < (int)est_state.at(0)(18); n++) { @@ -493,8 +493,241 @@ void ResultSimulation::plot_cam_extrinsics(bool doplotting, double max_time) { matplotlibcpp::subplot(3, 1, 3); matplotlibcpp::ylabel("z-error (m)"); matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); + matplotlibcpp::show(false); + +#endif +} + +void ResultSimulation::plot_imu_intrinsics(bool doplotting, double max_time) { + + // Check that we have cameras + if ((int)est_state.at(0)(18) < 1) { + PRINT_ERROR(YELLOW "You need at least one camera to run estimator and plot results...\n" RESET); + return; + } + + // get the parameters id + int num_cam = (int)est_state.at(0)(18); + int imu_model = (int)est_state.at(0)(1 + 16 + 1 + 1 + 15 * num_cam); + int dw_id = 1 + 16 + 1 + 1 + 15 * num_cam + 1; + int dw_cov_id = 1 + 15 + 1 + 1 + 14 * num_cam + 1; + int da_id = dw_id + 6; + int da_cov_id = dw_cov_id + 6; + int tg_id = da_id + 6; + int tg_cov_id = da_cov_id + 6; + int wtoI_id = tg_id + 9; + int wtoI_cov_id = tg_cov_id + 9; + int atoI_id = wtoI_id + 4; + int atoI_cov_id = wtoI_cov_id + 3; + + // IMU intrinsics statistic storage + std::vector error_dw, error_da, error_tg, error_wtoI, error_atoI; + for (int j = 0; j < 6; j++) { + error_dw.emplace_back(Statistics()); + error_da.emplace_back(Statistics()); + } + for (int j = 0; j < 9; j++) { + error_tg.emplace_back(Statistics()); + } + for (int j = 0; j < 3; j++) { + error_wtoI.emplace_back(Statistics()); + error_atoI.emplace_back(Statistics()); + } + + // Loop through and calculate error + double start_time = est_state.at(0)(0); + for (size_t i = 0; i < est_state.size(); i++) { + + // Exit if we have reached our max time + if ((est_state.at(i)(0) - start_time) > max_time) + break; + + // Assert our times are the same + assert(est_state.at(i)(0) == gt_state.at(i)(0)); + + // If we are not calibrating then don't plot it! + if (state_cov.at(i)(dw_cov_id) == 0.0) { + PRINT_WARNING(YELLOW "IMU intrinsics not calibrated online, so will not plot...\n" RESET); + return; + } + + // Loop through IMU parameters and calculate error + for (int j = 0; j < 6; j++) { + error_dw.at(j).timestamps.push_back(est_state.at(i)(0)); + error_dw.at(j).values.push_back(est_state.at(i)(dw_id + j) - gt_state.at(i)(dw_id + j)); + error_dw.at(j).values_bound.push_back(3 * state_cov.at(i)(dw_cov_id + j)); + error_da.at(j).timestamps.push_back(est_state.at(i)(0)); + error_da.at(j).values.push_back(est_state.at(i)(da_id + j) - gt_state.at(i)(da_id + j)); + error_da.at(j).values_bound.push_back(3 * state_cov.at(i)(da_cov_id + j)); + } + for (int j = 0; j < 9; j++) { + error_tg.at(j).timestamps.push_back(est_state.at(i)(0)); + error_tg.at(j).values.push_back(est_state.at(i)(tg_id + j) - gt_state.at(i)(tg_id + j)); + error_tg.at(j).values_bound.push_back(3 * state_cov.at(i)(tg_cov_id + j)); + } + + // Calculate orientation errors + Eigen::Matrix3d e_R_wtoI = ov_core::quat_2_Rot(gt_state.at(i).block<4, 1>(wtoI_id, 0)) * + ov_core::quat_2_Rot(est_state.at(i).block<4, 1>(wtoI_id, 0)).transpose(); + Eigen::Vector3d ori_wtoI = -180.0 / M_PI * ov_core::log_so3(e_R_wtoI); + Eigen::Matrix3d e_R_atoI = ov_core::quat_2_Rot(gt_state.at(i).block<4, 1>(atoI_id, 0)) * + ov_core::quat_2_Rot(est_state.at(i).block<4, 1>(atoI_id, 0)).transpose(); + Eigen::Vector3d ori_atoI = -180.0 / M_PI * ov_core::log_so3(e_R_atoI); + for (int j = 0; j < 3; j++) { + error_wtoI.at(j).timestamps.push_back(est_state.at(i)(0)); + error_wtoI.at(j).values.push_back(ori_wtoI(j)); + error_wtoI.at(j).values_bound.push_back(3 * 180.0 / M_PI * state_cov.at(i)(wtoI_cov_id + j)); + error_atoI.at(j).timestamps.push_back(est_state.at(i)(0)); + error_atoI.at(j).values.push_back(ori_atoI(j)); + error_atoI.at(j).values_bound.push_back(3 * 180.0 / M_PI * state_cov.at(i)(atoI_cov_id + j)); + } + } + + // return if we don't want to plot + if (!doplotting) + return; + +#ifndef HAVE_PYTHONLIBS + PRINT_ERROR(RED "Matplotlib not loaded, so will not plot, just returning..\n" RESET); + return; +#else + + // Plot line colors + std::vector colors = {"blue", "red", "black", "green", "cyan", "magenta"}; + std::string estcolor = ((int)est_state.at(0)(18) == 1) ? "blue" : colors.at(0); + std::string stdcolor = ((int)est_state.at(0)(18) == 1) ? "red" : colors.at(1); + + // Plot this figure + matplotlibcpp::figure_size(1000, 500); + plot_6errors(error_dw[0], error_dw[3], error_dw[1], error_dw[4], error_dw[2], error_dw[5], colors.at(0), stdcolor); + + // Update the title and axis labels + matplotlibcpp::subplot(3, 2, 1); + matplotlibcpp::title("IMU Dw Error (1:3)"); + matplotlibcpp::ylabel("dw_1"); + matplotlibcpp::subplot(3, 2, 2); + matplotlibcpp::title("IMU Dw Error (4:6)"); + matplotlibcpp::ylabel("dw_4"); + matplotlibcpp::subplot(3, 2, 3); + matplotlibcpp::ylabel("dw_2"); + matplotlibcpp::subplot(3, 2, 4); + matplotlibcpp::ylabel("dw_5"); + matplotlibcpp::subplot(3, 2, 5); + matplotlibcpp::ylabel("dw_3"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::subplot(3, 2, 6); + matplotlibcpp::ylabel("dw_6"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); + + //===================================================== //===================================================== + // Plot this figure + matplotlibcpp::figure_size(1000, 500); + plot_6errors(error_da[0], error_da[3], error_da[1], error_da[4], error_da[2], error_da[5], colors.at(0), stdcolor); + + // Update the title and axis labels + matplotlibcpp::subplot(3, 2, 1); + matplotlibcpp::title("IMU Da Error (1:3)"); + matplotlibcpp::ylabel("da_1"); + matplotlibcpp::subplot(3, 2, 2); + matplotlibcpp::title("IMU Da Error (4:6)"); + matplotlibcpp::ylabel("da_4"); + matplotlibcpp::subplot(3, 2, 3); + matplotlibcpp::ylabel("da_2"); + matplotlibcpp::subplot(3, 2, 4); + matplotlibcpp::ylabel("da_5"); + matplotlibcpp::subplot(3, 2, 5); + matplotlibcpp::ylabel("da_3"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::subplot(3, 2, 6); + matplotlibcpp::ylabel("da_6"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); + matplotlibcpp::show(false); + + //===================================================== + //===================================================== + + // Plot this figure + // NOTE: display is row-based not column-based + matplotlibcpp::figure_size(1400, 500); + plot_9errors(error_tg[0], error_tg[3], error_tg[6], error_tg[1], error_tg[4], error_tg[7], error_tg[2], error_tg[5], error_tg[8], + colors.at(0), stdcolor); + + // Update the title and axis labels + matplotlibcpp::subplot(3, 3, 1); + matplotlibcpp::title("IMU Tg Error (1:3)"); + matplotlibcpp::ylabel("tg_1"); + matplotlibcpp::subplot(3, 3, 2); + matplotlibcpp::title("IMU Tg Error (4:6)"); + matplotlibcpp::ylabel("tg_4"); + matplotlibcpp::subplot(3, 3, 3); + matplotlibcpp::title("IMU Tg Error (7:9)"); + matplotlibcpp::ylabel("tg_7"); + matplotlibcpp::subplot(3, 3, 4); + matplotlibcpp::ylabel("tg_2"); + matplotlibcpp::subplot(3, 3, 5); + matplotlibcpp::ylabel("tg_5"); + matplotlibcpp::subplot(3, 3, 6); + matplotlibcpp::ylabel("tg_8"); + matplotlibcpp::subplot(3, 3, 7); + matplotlibcpp::ylabel("tg_3"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::subplot(3, 3, 8); + matplotlibcpp::ylabel("tg_6"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::subplot(3, 3, 9); + matplotlibcpp::ylabel("tg_9"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); + matplotlibcpp::show(false); + + //===================================================== + //===================================================== + + // Finally plot + if (imu_model == 0) { + + // Kalibr model + // Plot this figure + matplotlibcpp::figure_size(800, 500); + plot_3errors(error_wtoI[0], error_wtoI[1], error_wtoI[2], colors.at(0), stdcolor); + + // Update the title and axis labels + matplotlibcpp::subplot(3, 1, 1); + matplotlibcpp::title("IMU R_GYROtoIMU Error"); + matplotlibcpp::ylabel("x-error (deg)"); + matplotlibcpp::subplot(3, 1, 2); + matplotlibcpp::ylabel("y-error (deg)"); + matplotlibcpp::subplot(3, 1, 3); + matplotlibcpp::ylabel("z-error (deg)"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); + matplotlibcpp::show(false); + + } else { + + // RPNG model + // Plot this figure + matplotlibcpp::figure_size(800, 500); + plot_3errors(error_atoI[0], error_atoI[1], error_atoI[2], colors.at(0), stdcolor); + + // Update the title and axis labels + matplotlibcpp::subplot(3, 1, 1); + matplotlibcpp::title("IMU R_ACCtoIMU Error"); + matplotlibcpp::ylabel("x-error (deg)"); + matplotlibcpp::subplot(3, 1, 2); + matplotlibcpp::ylabel("y-error (deg)"); + matplotlibcpp::subplot(3, 1, 3); + matplotlibcpp::ylabel("z-error (deg)"); + matplotlibcpp::xlabel("dataset time (s)"); + matplotlibcpp::tight_layout(); + matplotlibcpp::show(false); + } + #endif } diff --git a/ov_eval/src/calc/ResultSimulation.h b/ov_eval/src/calc/ResultSimulation.h index f42047d0e..b033365a8 100644 --- a/ov_eval/src/calc/ResultSimulation.h +++ b/ov_eval/src/calc/ResultSimulation.h @@ -95,6 +95,13 @@ class ResultSimulation { */ void plot_cam_extrinsics(bool doplotting, double max_time = INFINITY); + /** + * @brief Will plot the imu intrinsic errors + * @param doplotting True if you want to display the plots + * @param max_time Max number of second we want to plot + */ + void plot_imu_intrinsics(bool doplotting, double max_time = INFINITY); + protected: // Trajectory data (loaded from file and timestamp intersected) std::vector est_state, gt_state; @@ -266,6 +273,316 @@ class ResultSimulation { } matplotlibcpp::xlim(0.0, endtime4 - starttime4); } + /** + * @brief Plots six different statistic values and sigma bounds + * @param s1 Error one + * @param s2 Error two + * @param s3 Error three + * @param s4 Error four + * @param s5 Error five + * @param s6 Error six + * @param color_err MATLAB color string for error line (blue, red, etc.) + * @param color_std MATLAB color string for deviation (blue, red, etc.) + */ + void plot_6errors(ov_eval::Statistics s1, ov_eval::Statistics s2, ov_eval::Statistics s3, ov_eval::Statistics s4, ov_eval::Statistics s5, + ov_eval::Statistics s6, std::string color_err, std::string color_std) { + + // Zero our time arrays + double starttime1 = (s1.timestamps.empty()) ? 0 : s1.timestamps.at(0); + double endtime1 = (s1.timestamps.empty()) ? 0 : s1.timestamps.at(s1.timestamps.size() - 1); + for (size_t i = 0; i < s1.timestamps.size(); i++) { + s1.timestamps.at(i) -= starttime1; + } + double starttime2 = (s2.timestamps.empty()) ? 0 : s2.timestamps.at(0); + double endtime2 = (s2.timestamps.empty()) ? 0 : s2.timestamps.at(s2.timestamps.size() - 1); + for (size_t i = 0; i < s2.timestamps.size(); i++) { + s2.timestamps.at(i) -= starttime2; + } + double starttime3 = (s3.timestamps.empty()) ? 0 : s3.timestamps.at(0); + double endtime3 = (s3.timestamps.empty()) ? 0 : s3.timestamps.at(s3.timestamps.size() - 1); + for (size_t i = 0; i < s3.timestamps.size(); i++) { + s3.timestamps.at(i) -= starttime3; + } + double starttime4 = (s4.timestamps.empty()) ? 0 : s4.timestamps.at(0); + double endtime4 = (s4.timestamps.empty()) ? 0 : s4.timestamps.at(s4.timestamps.size() - 1); + for (size_t i = 0; i < s4.timestamps.size(); i++) { + s4.timestamps.at(i) -= starttime4; + } + double starttime5 = (s5.timestamps.empty()) ? 0 : s5.timestamps.at(0); + double endtime5 = (s5.timestamps.empty()) ? 0 : s5.timestamps.at(s5.timestamps.size() - 1); + for (size_t i = 0; i < s5.timestamps.size(); i++) { + s5.timestamps.at(i) -= starttime5; + } + double starttime6 = (s6.timestamps.empty()) ? 0 : s6.timestamps.at(0); + double endtime6 = (s6.timestamps.empty()) ? 0 : s6.timestamps.at(s6.timestamps.size() - 1); + for (size_t i = 0; i < s6.timestamps.size(); i++) { + s6.timestamps.at(i) -= starttime6; + } + + // Parameters that define the line styles + std::map params_value, params_bound; + // params_value.insert({"label","error"}); + params_value.insert({"linestyle", "-"}); + params_value.insert({"color", color_err}); + // params_bound.insert({"label","3 sigma bound"}); + params_bound.insert({"linestyle", "--"}); + params_bound.insert({"color", color_std}); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 1); + matplotlibcpp::plot(s1.timestamps, s1.values, params_value); + if (!s1.values_bound.empty()) { + matplotlibcpp::plot(s1.timestamps, s1.values_bound, params_bound); + for (size_t i = 0; i < s1.timestamps.size(); i++) { + s1.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s1.timestamps, s1.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime1 - starttime1); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 2); + matplotlibcpp::plot(s2.timestamps, s2.values, params_value); + if (!s2.values_bound.empty()) { + matplotlibcpp::plot(s2.timestamps, s2.values_bound, params_bound); + for (size_t i = 0; i < s2.timestamps.size(); i++) { + s2.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s2.timestamps, s2.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime2 - starttime2); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 3); + matplotlibcpp::plot(s3.timestamps, s3.values, params_value); + if (!s3.values_bound.empty()) { + matplotlibcpp::plot(s3.timestamps, s3.values_bound, params_bound); + for (size_t i = 0; i < s3.timestamps.size(); i++) { + s3.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s3.timestamps, s3.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime3 - starttime3); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 4); + matplotlibcpp::plot(s4.timestamps, s4.values, params_value); + if (!s4.values_bound.empty()) { + matplotlibcpp::plot(s4.timestamps, s4.values_bound, params_bound); + for (size_t i = 0; i < s4.timestamps.size(); i++) { + s4.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s4.timestamps, s4.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime4 - starttime4); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 5); + matplotlibcpp::plot(s5.timestamps, s5.values, params_value); + if (!s5.values_bound.empty()) { + matplotlibcpp::plot(s5.timestamps, s5.values_bound, params_bound); + for (size_t i = 0; i < s5.timestamps.size(); i++) { + s5.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s5.timestamps, s5.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime5 - starttime5); + + // Plot our error value + matplotlibcpp::subplot(3, 2, 6); + matplotlibcpp::plot(s6.timestamps, s6.values, params_value); + if (!s6.values_bound.empty()) { + matplotlibcpp::plot(s6.timestamps, s6.values_bound, params_bound); + for (size_t i = 0; i < s6.timestamps.size(); i++) { + s6.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s6.timestamps, s6.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime6 - starttime6); + } + + /** + * @brief Plots six different statistic values and sigma bounds + * @param s1 Error one + * @param s2 Error two + * @param s3 Error three + * @param s4 Error four + * @param s5 Error five + * @param s6 Error six + * @param s7 Error four + * @param s8 Error five + * @param s9 Error six + * @param color_err MATLAB color string for error line (blue, red, etc.) + * @param color_std MATLAB color string for deviation (blue, red, etc.) + */ + void plot_9errors(ov_eval::Statistics s1, ov_eval::Statistics s2, ov_eval::Statistics s3, ov_eval::Statistics s4, ov_eval::Statistics s5, + ov_eval::Statistics s6, ov_eval::Statistics s7, ov_eval::Statistics s8, ov_eval::Statistics s9, std::string color_err, + std::string color_std) { + + // Zero our time arrays + double starttime1 = (s1.timestamps.empty()) ? 0 : s1.timestamps.at(0); + double endtime1 = (s1.timestamps.empty()) ? 0 : s1.timestamps.at(s1.timestamps.size() - 1); + for (size_t i = 0; i < s1.timestamps.size(); i++) { + s1.timestamps.at(i) -= starttime1; + } + double starttime2 = (s2.timestamps.empty()) ? 0 : s2.timestamps.at(0); + double endtime2 = (s2.timestamps.empty()) ? 0 : s2.timestamps.at(s2.timestamps.size() - 1); + for (size_t i = 0; i < s2.timestamps.size(); i++) { + s2.timestamps.at(i) -= starttime2; + } + double starttime3 = (s3.timestamps.empty()) ? 0 : s3.timestamps.at(0); + double endtime3 = (s3.timestamps.empty()) ? 0 : s3.timestamps.at(s3.timestamps.size() - 1); + for (size_t i = 0; i < s3.timestamps.size(); i++) { + s3.timestamps.at(i) -= starttime3; + } + double starttime4 = (s4.timestamps.empty()) ? 0 : s4.timestamps.at(0); + double endtime4 = (s4.timestamps.empty()) ? 0 : s4.timestamps.at(s4.timestamps.size() - 1); + for (size_t i = 0; i < s4.timestamps.size(); i++) { + s4.timestamps.at(i) -= starttime4; + } + double starttime5 = (s5.timestamps.empty()) ? 0 : s5.timestamps.at(0); + double endtime5 = (s5.timestamps.empty()) ? 0 : s5.timestamps.at(s5.timestamps.size() - 1); + for (size_t i = 0; i < s5.timestamps.size(); i++) { + s5.timestamps.at(i) -= starttime5; + } + double starttime6 = (s6.timestamps.empty()) ? 0 : s6.timestamps.at(0); + double endtime6 = (s6.timestamps.empty()) ? 0 : s6.timestamps.at(s6.timestamps.size() - 1); + for (size_t i = 0; i < s6.timestamps.size(); i++) { + s6.timestamps.at(i) -= starttime6; + } + double starttime7 = (s7.timestamps.empty()) ? 0 : s7.timestamps.at(0); + double endtime7 = (s7.timestamps.empty()) ? 0 : s7.timestamps.at(s7.timestamps.size() - 1); + for (size_t i = 0; i < s7.timestamps.size(); i++) { + s7.timestamps.at(i) -= starttime7; + } + double starttime8 = (s8.timestamps.empty()) ? 0 : s8.timestamps.at(0); + double endtime8 = (s8.timestamps.empty()) ? 0 : s8.timestamps.at(s8.timestamps.size() - 1); + for (size_t i = 0; i < s8.timestamps.size(); i++) { + s8.timestamps.at(i) -= starttime8; + } + double starttime9 = (s9.timestamps.empty()) ? 0 : s9.timestamps.at(0); + double endtime9 = (s9.timestamps.empty()) ? 0 : s9.timestamps.at(s9.timestamps.size() - 1); + for (size_t i = 0; i < s9.timestamps.size(); i++) { + s9.timestamps.at(i) -= starttime9; + } + + // Parameters that define the line styles + std::map params_value, params_bound; + // params_value.insert({"label","error"}); + params_value.insert({"linestyle", "-"}); + params_value.insert({"color", color_err}); + // params_bound.insert({"label","3 sigma bound"}); + params_bound.insert({"linestyle", "--"}); + params_bound.insert({"color", color_std}); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 1); + matplotlibcpp::plot(s1.timestamps, s1.values, params_value); + if (!s1.values_bound.empty()) { + matplotlibcpp::plot(s1.timestamps, s1.values_bound, params_bound); + for (size_t i = 0; i < s1.timestamps.size(); i++) { + s1.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s1.timestamps, s1.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime1 - starttime1); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 2); + matplotlibcpp::plot(s2.timestamps, s2.values, params_value); + if (!s2.values_bound.empty()) { + matplotlibcpp::plot(s2.timestamps, s2.values_bound, params_bound); + for (size_t i = 0; i < s2.timestamps.size(); i++) { + s2.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s2.timestamps, s2.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime2 - starttime2); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 3); + matplotlibcpp::plot(s3.timestamps, s3.values, params_value); + if (!s3.values_bound.empty()) { + matplotlibcpp::plot(s3.timestamps, s3.values_bound, params_bound); + for (size_t i = 0; i < s3.timestamps.size(); i++) { + s3.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s3.timestamps, s3.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime3 - starttime3); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 4); + matplotlibcpp::plot(s4.timestamps, s4.values, params_value); + if (!s4.values_bound.empty()) { + matplotlibcpp::plot(s4.timestamps, s4.values_bound, params_bound); + for (size_t i = 0; i < s4.timestamps.size(); i++) { + s4.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s4.timestamps, s4.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime4 - starttime4); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 5); + matplotlibcpp::plot(s5.timestamps, s5.values, params_value); + if (!s5.values_bound.empty()) { + matplotlibcpp::plot(s5.timestamps, s5.values_bound, params_bound); + for (size_t i = 0; i < s5.timestamps.size(); i++) { + s5.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s5.timestamps, s5.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime5 - starttime5); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 6); + matplotlibcpp::plot(s6.timestamps, s6.values, params_value); + if (!s6.values_bound.empty()) { + matplotlibcpp::plot(s6.timestamps, s6.values_bound, params_bound); + for (size_t i = 0; i < s6.timestamps.size(); i++) { + s6.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s6.timestamps, s6.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime6 - starttime6); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 7); + matplotlibcpp::plot(s7.timestamps, s7.values, params_value); + if (!s7.values_bound.empty()) { + matplotlibcpp::plot(s7.timestamps, s7.values_bound, params_bound); + for (size_t i = 0; i < s7.timestamps.size(); i++) { + s7.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s7.timestamps, s7.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime7 - starttime7); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 8); + matplotlibcpp::plot(s8.timestamps, s8.values, params_value); + if (!s8.values_bound.empty()) { + matplotlibcpp::plot(s8.timestamps, s8.values_bound, params_bound); + for (size_t i = 0; i < s8.timestamps.size(); i++) { + s8.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s8.timestamps, s8.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime8 - starttime8); + + // Plot our error value + matplotlibcpp::subplot(3, 3, 9); + matplotlibcpp::plot(s9.timestamps, s9.values, params_value); + if (!s9.values_bound.empty()) { + matplotlibcpp::plot(s9.timestamps, s9.values_bound, params_bound); + for (size_t i = 0; i < s9.timestamps.size(); i++) { + s9.values_bound.at(i) *= -1; + } + matplotlibcpp::plot(s9.timestamps, s9.values_bound, params_bound); + } + matplotlibcpp::xlim(0.0, endtime9 - starttime9); + } #endif }; diff --git a/ov_eval/src/calc/ResultTrajectory.cpp b/ov_eval/src/calc/ResultTrajectory.cpp index 33a13b695..851d346ee 100644 --- a/ov_eval/src/calc/ResultTrajectory.cpp +++ b/ov_eval/src/calc/ResultTrajectory.cpp @@ -34,6 +34,13 @@ ResultTrajectory::ResultTrajectory(std::string path_est, std::string path_gt, st // std::string base_filename2 = path_gt.substr(path_gt.find_last_of("/\\") + 1); // PRINT_DEBUG("[TRAJ]: loaded %d poses from %s\n",(int)est_times.size(),base_filename1.c_str()); // PRINT_DEBUG("[TRAJ]: loaded %d poses from %s\n",(int)gt_times.size(),base_filename2.c_str()); + double len_gt = ov_eval::Loader::get_total_length(gt_poses); + double len_est = ov_eval::Loader::get_total_length(est_poses); + double ratio = len_est / len_gt; + if (ratio > 1.1 || ratio < 0.9) { + PRINT_WARNING(YELLOW "[TRAJ]: Trajectory is a bad ratio of %.2f length (est %.2f, gt %.2f)\n", ratio, len_est, len_gt); + PRINT_WARNING(YELLOW "[TRAJ]: %s\n", path_est.c_str()); + } // Intersect timestamps AlignUtils::perform_association(0, 0.02, est_times, gt_times, est_poses, gt_poses, est_covori, est_covpos, gt_covori, gt_covpos); diff --git a/ov_eval/src/error_comparison.cpp b/ov_eval/src/error_comparison.cpp index b6d967b16..9cda7627b 100644 --- a/ov_eval/src/error_comparison.cpp +++ b/ov_eval/src/error_comparison.cpp @@ -91,12 +91,14 @@ int main(int argc, char **argv) { // ATE summery information std::map>> algo_ate; + std::map>> algo_ate_2d; for (const auto &p : path_algorithms) { std::vector> temp; for (size_t i = 0; i < path_groundtruths.size(); i++) { temp.push_back({ov_eval::Statistics(), ov_eval::Statistics()}); } algo_ate.insert({p.filename().string(), temp}); + algo_ate_2d.insert({p.filename().string(), temp}); } // Relative pose error segment lengths @@ -104,7 +106,7 @@ int main(int argc, char **argv) { // std::vector segments = {7.0, 14.0, 21.0, 28.0, 35.0}; // std::vector segments = {10.0, 25.0, 50.0, 75.0, 120.0}; // std::vector segments = {5.0, 15.0, 30.0, 45.0, 60.0}; - // std::vector segments = {40.0, 60.0, 80.0, 100.0, 120.0}; + // std::vector segments = {40.0, 80.0, 120.0, 160.0, 200.0, 240.0}; // The overall RPE error calculation for each algorithm type std::map>> algo_rpe; @@ -150,8 +152,8 @@ int main(int argc, char **argv) { path_groundtruths.at(j).stem().c_str()); // Errors for this specific dataset (i.e. our averages over the total runs) - ov_eval::Statistics ate_dataset_ori; - ov_eval::Statistics ate_dataset_pos; + ov_eval::Statistics ate_dataset_ori, ate_dataset_pos; + ov_eval::Statistics ate_2d_dataset_ori, ate_2d_dataset_pos; std::map> rpe_dataset; for (const auto &len : segments) { rpe_dataset.insert({len, {ov_eval::Statistics(), ov_eval::Statistics()}}); @@ -181,6 +183,12 @@ int main(int argc, char **argv) { ate_dataset_ori.values.push_back(error_ori.rmse); ate_dataset_pos.values.push_back(error_pos.rmse); + // Calculate ATE 2D error for this dataset + ov_eval::Statistics error_ori_2d, error_pos_2d; + traj.calculate_ate_2d(error_ori_2d, error_pos_2d); + ate_2d_dataset_ori.values.push_back(error_ori_2d.rmse); + ate_2d_dataset_pos.values.push_back(error_pos_2d.rmse); + // Calculate RPE error for this dataset std::map> error_rpe; traj.calculate_rpe(segments, error_rpe); @@ -199,12 +207,17 @@ int main(int argc, char **argv) { // Compute our mean ATE score ate_dataset_ori.calculate(); ate_dataset_pos.calculate(); + ate_2d_dataset_ori.calculate(); + ate_2d_dataset_pos.calculate(); // Print stats for this specific dataset std::string prefix = (ate_dataset_ori.mean > 10 || ate_dataset_pos.mean > 10) ? RED : ""; PRINT_DEBUG("%s\tATE: mean_ori = %.3f | mean_pos = %.3f (%d runs)\n" RESET, prefix.c_str(), ate_dataset_ori.mean, ate_dataset_pos.mean, (int)ate_dataset_pos.values.size()); PRINT_DEBUG("\tATE: std_ori = %.3f | std_pos = %.3f\n", ate_dataset_ori.std, ate_dataset_pos.std); + PRINT_DEBUG("\tATE 2D: mean_ori = %.3f | mean_pos = %.3f (%d runs)\n", ate_2d_dataset_ori.mean, ate_2d_dataset_pos.mean, + (int)ate_2d_dataset_ori.values.size()); + PRINT_DEBUG("\tATE 2D: std_ori = %.5f | std_pos = %.5f\n", ate_2d_dataset_ori.std, ate_2d_dataset_pos.std); for (auto &seg : rpe_dataset) { seg.second.first.calculate(); seg.second.second.calculate(); @@ -219,6 +232,8 @@ int main(int argc, char **argv) { std::string algo = path_algorithms.at(i).filename().string(); algo_ate.at(algo).at(j).first = ate_dataset_ori; algo_ate.at(algo).at(j).second = ate_dataset_pos; + algo_ate_2d.at(algo).at(j).first = ate_2d_dataset_ori; + algo_ate_2d.at(algo).at(j).second = ate_2d_dataset_pos; // Update the global RPE error stats for (const auto &elm : rpe_dataset) { @@ -272,6 +287,39 @@ int main(int argc, char **argv) { } PRINT_INFO("============================================\n"); + // Finally print the ATE for all the runs + PRINT_INFO("============================================\n"); + PRINT_INFO("ATE 2D LATEX TABLE\n"); + PRINT_INFO("============================================\n"); + for (size_t i = 0; i < path_groundtruths.size(); i++) { + std::string gtname = path_groundtruths.at(i).stem().string(); + boost::replace_all(gtname, "_", "\\_"); + PRINT_INFO(" & \\textbf{%s}", gtname.c_str()); + } + PRINT_INFO(" & \\textbf{Average} \\\\\\hline\n"); + for (auto &algo : algo_ate_2d) { + std::string algoname = algo.first; + boost::replace_all(algoname, "_", "\\_"); + PRINT_INFO(algoname.c_str()); + double sum_ori = 0.0; + double sum_pos = 0.0; + int sum_ct = 0; + for (auto &seg : algo.second) { + if (seg.first.values.empty() || seg.second.values.empty()) { + PRINT_INFO(" & - / -"); + } else { + seg.first.calculate(); + seg.second.calculate(); + PRINT_INFO(" & %.3f / %.3f", seg.first.mean, seg.second.mean); + sum_ori += seg.first.mean; + sum_pos += seg.second.mean; + sum_ct++; + } + } + PRINT_INFO(" & %.3f / %.3f \\\\\n", sum_ori / sum_ct, sum_pos / sum_ct); + } + PRINT_INFO("============================================\n"); + // Finally print the RPE for all the runs PRINT_INFO("============================================\n"); PRINT_INFO("RPE LATEX TABLE\n"); @@ -385,6 +433,7 @@ int main(int argc, char **argv) { matplotlibcpp::title("Relative Position Error"); matplotlibcpp::ylabel("translational error (m)"); matplotlibcpp::xlabel("sub-segment lengths (m)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(true); #endif diff --git a/ov_eval/src/error_simulation.cpp b/ov_eval/src/error_simulation.cpp index 6591b24aa..73483a620 100644 --- a/ov_eval/src/error_simulation.cpp +++ b/ov_eval/src/error_simulation.cpp @@ -63,6 +63,10 @@ int main(int argc, char **argv) { PRINT_INFO("Plotting camera extrinsics...\n"); traj.plot_cam_extrinsics(true, 60); + // Plot IMU intrinsics + PRINT_INFO("Plotting IMU intrinsics...\n"); + traj.plot_imu_intrinsics(true, 60); + #ifdef HAVE_PYTHONLIBS matplotlibcpp::show(true); #endif diff --git a/ov_eval/src/error_singlerun.cpp b/ov_eval/src/error_singlerun.cpp index e0ddfecb5..625453883 100644 --- a/ov_eval/src/error_singlerun.cpp +++ b/ov_eval/src/error_singlerun.cpp @@ -172,6 +172,7 @@ int main(int argc, char **argv) { matplotlibcpp::title("Relative Orientation Error"); matplotlibcpp::ylabel("orientation error (deg)"); matplotlibcpp::xlabel("sub-segment lengths (m)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); // Plot this figure @@ -189,6 +190,7 @@ int main(int argc, char **argv) { matplotlibcpp::title("Relative Position Error"); matplotlibcpp::ylabel("translation error (m)"); matplotlibcpp::xlabel("sub-segment lengths (m)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); #endif diff --git a/ov_eval/src/format_converter.cpp b/ov_eval/src/format_converter.cpp index b69be4980..4cf9c8bd2 100644 --- a/ov_eval/src/format_converter.cpp +++ b/ov_eval/src/format_converter.cpp @@ -134,8 +134,8 @@ int main(int argc, char **argv) { // Ensure we have a path if (argc < 2) { PRINT_ERROR(RED "ERROR: Please specify a file to convert\n" RESET); - PRINT_ERROR(RED "ERROR: ./format_convert \n" RESET); + PRINT_ERROR(RED "ERROR: ./format_converter \n" RESET); std::exit(EXIT_FAILURE); } diff --git a/ov_eval/src/plot_trajectories.cpp b/ov_eval/src/plot_trajectories.cpp index 7cde1e28b..b3cb25716 100644 --- a/ov_eval/src/plot_trajectories.cpp +++ b/ov_eval/src/plot_trajectories.cpp @@ -177,6 +177,7 @@ int main(int argc, char **argv) { // Display to the user matplotlibcpp::xlabel("x-axis (m)"); matplotlibcpp::ylabel("y-axis (m)"); + matplotlibcpp::tight_layout(); matplotlibcpp::show(false); // Plot this figure @@ -201,6 +202,7 @@ int main(int argc, char **argv) { matplotlibcpp::ylabel("z-axis (m)"); matplotlibcpp::xlim(0.0, endtime - starttime); matplotlibcpp::legend(); + matplotlibcpp::tight_layout(); matplotlibcpp::show(true); #endif diff --git a/ov_init/package.xml b/ov_init/package.xml index ad8ccb483..8a39028cb 100644 --- a/ov_init/package.xml +++ b/ov_init/package.xml @@ -3,7 +3,7 @@ ov_init - 2.6.3 + 2.7.0 Initialization package which handles static and dynamic initialization. diff --git a/ov_init/src/dynamic/DynamicInitializer.cpp b/ov_init/src/dynamic/DynamicInitializer.cpp index dc8192fa8..dc48a11c9 100644 --- a/ov_init/src/dynamic/DynamicInitializer.cpp +++ b/ov_init/src/dynamic/DynamicInitializer.cpp @@ -69,7 +69,7 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian have_old_imu_readings = true; it_imu = imu_data->erase(it_imu); } - if (_db->get_internal_data().size() < 0.95 * params.init_max_features) { + if (_db->get_internal_data().size() < 0.75 * params.init_max_features) { PRINT_WARNING(RED "[init-d]: only %zu valid features of required (%.0f thresh)!!\n" RESET, _db->get_internal_data().size(), 0.95 * params.init_max_features); return false; @@ -224,6 +224,7 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian // We will recover position of feature, velocity, gravity // Based on Equation (14) in the following paper: // https://ieeexplore.ieee.org/abstract/document/6386235 + // State ordering is: [features, velocity, gravity] // Feature size of 1 will use the first ever bearing of the feature as true (depth only..) const bool use_single_depth = false; int size_feature = (use_single_depth) ? 1 : 3; @@ -306,6 +307,7 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian } // Loop through each feature observation and append it! + // State ordering is: [features, velocity, gravity] Eigen::MatrixXd A = Eigen::MatrixXd::Zero(num_measurements, system_size); Eigen::VectorXd b = Eigen::VectorXd::Zero(num_measurements); PRINT_DEBUG("[init-d]: system of %d measurement x %d states created (%d features, %s)\n", num_measurements, system_size, num_features, @@ -358,35 +360,23 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian // = R_C0toCi * R_ItoC * (p_FinI0 - v_I0inI0 * dt - 0.5 * grav_inI0 * dt^2 - alpha) + p_IinC Eigen::MatrixXd H_proj = Eigen::MatrixXd::Zero(2, 3); H_proj << 1, 0, -uv_norm(0), 0, 1, -uv_norm(1); - Eigen::MatrixXd H = Eigen::MatrixXd::Zero(2, system_size); + Eigen::MatrixXd Y = H_proj * R_ItoC * R_I0toIk; + Eigen::MatrixXd H_i = Eigen::MatrixXd::Zero(2, system_size); + Eigen::MatrixXd b_i = Y * alpha_I0toIk - H_proj * p_IinC; if (size_feature == 1) { assert(false); - // H.block(0, size_feature * map_features.at(feat.first), 2, 1) = H_proj * R_ItoC * R_I0toIk * features_bearings.at(feat->featid); + // Substitute in p_FinI0 = z*bearing_inC0_rotI0 - R_ItoC^T*p_IinC + // H_i.block(0, size_feature * A_index_features.at(feat.first), 2, 1) = Y * features_bearings.at(feat.first); + // b_i += Y * R_ItoC.transpose() * p_IinC; } else { - H.block(0, size_feature * A_index_features.at(feat.first), 2, 3) = H_proj * R_ItoC * R_I0toIk; // feat + H_i.block(0, size_feature * A_index_features.at(feat.first), 2, 3) = Y; // feat } - H.block(0, size_feature * num_features + 0, 2, 3) = -H_proj * R_ItoC * R_I0toIk * DT; // vel - H.block(0, size_feature * num_features + 3, 2, 3) = 0.5 * H_proj * R_ItoC * R_I0toIk * DT * DT; // grav - Eigen::MatrixXd b_i = H_proj * (R_ItoC * R_I0toIk * alpha_I0toIk - p_IinC); - - // Uncertainty of the measurement is the feature pixel noise and and preintegration - // TODO: We should propagate the raw pixel uncertainty to the normalized and the preintegration error to the measurement - // TODO: Fix this logic since this uses a unique H_proj matrix structure... - // Eigen::MatrixXd R_sqrtinv = 1e4 * Eigen::MatrixXd::Identity(2, 2); - // if (map_camera_cpi_I0toIi.find(time) != map_camera_cpi_I0toIi.end() && map_camera_cpi_I0toIi.at(time) != nullptr) { - // Eigen::MatrixXd H_cpi = H_proj * R_ItoC * R_I0toIk; - // Eigen::MatrixXd P_alpha = map_camera_cpi_I0toIi.at(time)->P_meas.block(12, 12, 3, 3); - // Eigen::MatrixXd R = H_cpi * P_alpha * H_cpi.transpose(); - // R_sqrtinv = R.llt().matrixL(); - // R_sqrtinv = R_sqrtinv.colPivHouseholderQr().solve(Eigen::MatrixXd::Identity(R.rows(), R.rows())); - // } - // Eigen::MatrixXd H_dz_dzn, H_dz_dzeta; - // params.camera_intrinsics.at(cam_id)->compute_distort_jacobian(uv_norm, H_dz_dzn, H_dz_dzeta); - // Eigen::MatrixXd R_pixel = std::pow(params.sigma_pix, 2) * H_dz_dzn * H_dz_dzn.transpose(); + H_i.block(0, size_feature * num_features + 0, 2, 3) = -DT * Y; // vel + H_i.block(0, size_feature * num_features + 3, 2, 3) = 0.5 * DT * DT * Y; // grav // Else lets append this to our system! - A.block(index_meas, 0, 2, A.cols()) = H; // R_sqrtinv * H; - b.block(index_meas, 0, 2, 1) = b_i; // R_sqrtinv * b_i; + A.block(index_meas, 0, 2, A.cols()) = H_i; + b.block(index_meas, 0, 2, 1) = b_i; index_meas += 2; } } @@ -471,7 +461,7 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian Eigen::MatrixXd D_lambdaI_inv = (D - lambda_min * I_dd).llt().solve(I_dd); Eigen::VectorXd state_grav = D_lambdaI_inv * d; - // Overwrite our state + // Overwrite our state: [features, velocity, gravity] Eigen::VectorXd state_feat_vel = -A1A1_inv * A1.transpose() * A2 * state_grav + A1A1_inv * A1.transpose() * b; Eigen::MatrixXd x_hat = Eigen::MatrixXd::Zero(system_size, 1); x_hat.block(0, 0, size_feature * num_features + 3, 1) = state_feat_vel; @@ -533,15 +523,17 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian Eigen::Vector3d p_FinI0; if (size_feature == 1) { assert(false); - // p_FinI0 = x_hat(size_feature * map_features.at(feat.first) + 6, 0) * features_bearings.at(feat->featid); + // double depth = x_hat(size_feature * A_index_features.at(feat.first), 0); + // p_FinI0 = depth * features_bearings.at(feat.first) - R_ItoC.transpose() * p_IinC; } else { - p_FinI0 = x_hat.block(size_feature * A_index_features.at(feat.first) + 6, 0, 3, 1); + p_FinI0 = x_hat.block(size_feature * A_index_features.at(feat.first), 0, 3, 1); } bool is_behind = false; for (auto const &camtime : feat.second->timestamps) { size_t cam_id = camtime.first; Eigen::Vector4d q_ItoC = params.camera_extrinsics.at(cam_id).block(0, 0, 4, 1); - Eigen::Vector3d p_FinC0 = quat_2_Rot(q_ItoC) * p_FinI0; + Eigen::Vector3d p_IinC = params.camera_extrinsics.at(cam_id).block(4, 0, 3, 1); + Eigen::Vector3d p_FinC0 = quat_2_Rot(q_ItoC) * p_FinI0 + p_IinC; if (p_FinC0(2) < 0) { is_behind = true; } @@ -602,6 +594,28 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian std::map map_calib_cam; std::vector ceres_vars_calib_cam_intrinsics; + // Helper lambda that will free any memory we have allocated + auto free_state_memory = [&]() { + for (auto const &ptr : ceres_vars_ori) + delete[] ptr; + for (auto const &ptr : ceres_vars_pos) + delete[] ptr; + for (auto const &ptr : ceres_vars_vel) + delete[] ptr; + for (auto const &ptr : ceres_vars_bias_g) + delete[] ptr; + for (auto const &ptr : ceres_vars_bias_a) + delete[] ptr; + for (auto const &ptr : ceres_vars_feat) + delete[] ptr; + for (auto const &ptr : ceres_vars_calib_cam2imu_ori) + delete[] ptr; + for (auto const &ptr : ceres_vars_calib_cam2imu_pos) + delete[] ptr; + for (auto const &ptr : ceres_vars_calib_cam_intrinsics) + delete[] ptr; + }; + // Set the optimization settings // NOTE: We use dense schur since after eliminating features we have a dense problem // NOTE: http://ceres-solver.org/solving_faqs.html#solving @@ -887,6 +901,7 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian timestamp = newest_cam_time; if (params.init_dyn_mle_max_iter != 0 && summary.termination_type != ceres::CONVERGENCE) { PRINT_WARNING(YELLOW "[init-d]: opt failed: %s!\n" RESET, summary.message.c_str()); + free_state_memory(); return false; } PRINT_DEBUG("[init-d]: %s\n", summary.message.c_str()); @@ -990,13 +1005,14 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian ceres::Covariance::Options options_cov; options_cov.null_space_rank = (!params.init_dyn_mle_opt_calib) * ((int)map_calib_cam2imu.size() * (6 + 8)); options_cov.min_reciprocal_condition_number = params.init_dyn_min_rec_cond; - options_cov.algorithm_type = ceres::CovarianceAlgorithmType::DENSE_SVD; // SPARSE_QR, DENSE_SVD - options_cov.apply_loss_function = false; + // options_cov.algorithm_type = ceres::CovarianceAlgorithmType::DENSE_SVD; + options_cov.apply_loss_function = true; // Better consistency if we use this options_cov.num_threads = params.init_dyn_mle_max_threads; ceres::Covariance problem_cov(options_cov); bool success = problem_cov.Compute(covariance_blocks, &problem); if (!success) { PRINT_WARNING(YELLOW "[init-d]: covariance recovery failed...\n" RESET); + free_state_memory(); return false; } @@ -1086,5 +1102,6 @@ bool DynamicInitializer::initialize(double ×tamp, Eigen::MatrixXd &covarian PRINT_DEBUG("[TIME]: %.4f sec for ceres opt\n", (rT6 - rT5).total_microseconds() * 1e-6); PRINT_DEBUG("[TIME]: %.4f sec for ceres covariance\n", (rT7 - rT6).total_microseconds() * 1e-6); PRINT_DEBUG("[TIME]: %.4f sec total for initialization\n", (rT7 - rT1).total_microseconds() * 1e-6); + free_state_memory(); return true; } diff --git a/ov_init/src/init/InertialInitializer.cpp b/ov_init/src/init/InertialInitializer.cpp index 04e51071c..1ee82175c 100644 --- a/ov_init/src/init/InertialInitializer.cpp +++ b/ov_init/src/init/InertialInitializer.cpp @@ -132,7 +132,7 @@ bool InertialInitializer::initialize(double ×tamp, Eigen::MatrixXd &covaria if (((has_jerk && wait_for_jerk) || (is_still && !wait_for_jerk)) && params.init_imu_thresh > 0.0) { PRINT_DEBUG(GREEN "[init]: USING STATIC INITIALIZER METHOD!\n" RESET); return init_static->initialize(timestamp, covariance, order, t_imu, wait_for_jerk); - } else if (params.init_dyn_use) { + } else if (params.init_dyn_use && !is_still) { PRINT_DEBUG(GREEN "[init]: USING DYNAMIC INITIALIZER METHOD!\n" RESET); std::map> _clones_IMU; std::unordered_map> _features_SLAM; diff --git a/ov_init/src/test_dynamic_init.cpp b/ov_init/src/test_dynamic_init.cpp index 87196e2b3..9b20ced93 100644 --- a/ov_init/src/test_dynamic_init.cpp +++ b/ov_init/src/test_dynamic_init.cpp @@ -311,13 +311,13 @@ int main(int argc, char **argv) { sensor_msgs::PointCloud2 cloud; cloud.header.frame_id = "global"; cloud.header.stamp = ros::Time::now(); - cloud.width = 3 * _features_SLAM.size(); + cloud.width = _features_SLAM.size(); cloud.height = 1; cloud.is_bigendian = false; cloud.is_dense = false; // there may be invalid points sensor_msgs::PointCloud2Modifier modifier(cloud); modifier.setPointCloud2FieldsByString(1, "xyz"); - modifier.resize(3 * _features_SLAM.size()); + modifier.resize(_features_SLAM.size()); sensor_msgs::PointCloud2Iterator out_x(cloud, "x"); sensor_msgs::PointCloud2Iterator out_y(cloud, "y"); sensor_msgs::PointCloud2Iterator out_z(cloud, "z"); diff --git a/ov_init/src/test_dynamic_mle.cpp b/ov_init/src/test_dynamic_mle.cpp index 42212a426..4cdea0937 100644 --- a/ov_init/src/test_dynamic_mle.cpp +++ b/ov_init/src/test_dynamic_mle.cpp @@ -614,13 +614,13 @@ int main(int argc, char **argv) { sensor_msgs::PointCloud2 cloud; cloud.header.frame_id = "global"; cloud.header.stamp = ros::Time::now(); - cloud.width = 3 * map_features.size(); + cloud.width = map_features.size(); cloud.height = 1; cloud.is_bigendian = false; cloud.is_dense = false; // there may be invalid points sensor_msgs::PointCloud2Modifier modifier(cloud); modifier.setPointCloud2FieldsByString(1, "xyz"); - modifier.resize(3 * map_features.size()); + modifier.resize(map_features.size()); sensor_msgs::PointCloud2Iterator out_x(cloud, "x"); sensor_msgs::PointCloud2Iterator out_y(cloud, "y"); sensor_msgs::PointCloud2Iterator out_z(cloud, "z"); diff --git a/ov_msckf/launch/serial.launch b/ov_msckf/launch/serial.launch index b4dd35fcc..0ba496e34 100644 --- a/ov_msckf/launch/serial.launch +++ b/ov_msckf/launch/serial.launch @@ -2,14 +2,14 @@ - + - + @@ -24,7 +24,6 @@ - diff --git a/ov_msckf/launch/simulation.launch b/ov_msckf/launch/simulation.launch index 753785ff3..e03109b3c 100644 --- a/ov_msckf/launch/simulation.launch +++ b/ov_msckf/launch/simulation.launch @@ -10,7 +10,7 @@ - + @@ -24,27 +24,26 @@ - - + - - + + + + - + - + - - @@ -78,17 +77,19 @@ - - - - - - - - - - - + + + + + + + + + + + + + diff --git a/ov_msckf/launch/subscribe.launch b/ov_msckf/launch/subscribe.launch index beacbcc43..c7dd243c3 100644 --- a/ov_msckf/launch/subscribe.launch +++ b/ov_msckf/launch/subscribe.launch @@ -26,7 +26,6 @@ - diff --git a/ov_msckf/package.xml b/ov_msckf/package.xml index 09d0412b7..3386aba83 100644 --- a/ov_msckf/package.xml +++ b/ov_msckf/package.xml @@ -3,7 +3,7 @@ ov_msckf - 2.6.3 + 2.7.0 Implementation of a type-based error-state Kalman filter. diff --git a/ov_msckf/scripts/run_ros_eth.sh b/ov_msckf/scripts/run_ros_eth.sh index 91cd27878..7c0e5c150 100755 --- a/ov_msckf/scripts/run_ros_eth.sh +++ b/ov_msckf/scripts/run_ros_eth.sh @@ -39,18 +39,18 @@ bagstarttimes=( "0" "0" "0" - "40" # 40 - "35" # 35 - "5" # 10 - "10" # 17 - "5" # 18 + "40" + "35" + "5" + "10" + "5" # stereo can fail if starts while still due to bad left-right KLT.... ) # location to save log files into save_path1="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_euroc/algorithms" save_path2="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_euroc/timings" -bag_path="/home/patrick/datasets/euroc_mav/" -ov_ver="2.6.3" +bag_path="/media/patrick/Windows/datasets/euroc_mav/" +ov_ver="2.7" #============================================================= diff --git a/ov_msckf/scripts/run_ros_kaist.sh b/ov_msckf/scripts/run_ros_kaist.sh index bb92f00a5..bccace9a1 100755 --- a/ov_msckf/scripts/run_ros_kaist.sh +++ b/ov_msckf/scripts/run_ros_kaist.sh @@ -18,7 +18,7 @@ modes=( # dataset locations bagnames=( "urban28" - "urban32" +# "urban32" # "urban34" # too strong of sun... "urban38" "urban39" @@ -37,8 +37,8 @@ bagstarttimes=( # location to save log files into save_path1="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_kaist/algorithms" save_path2="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_kaist/timings" -bag_path="/home/patrick/datasets/euroc_mav/" -ov_ver="2.6.3" +bag_path="/media/patrick/Windows/datasets/kaist/" +ov_ver="2.7" #============================================================= diff --git a/ov_msckf/scripts/run_ros_tumvi.sh b/ov_msckf/scripts/run_ros_tumvi.sh index 76e15bf5f..15aace19f 100755 --- a/ov_msckf/scripts/run_ros_tumvi.sh +++ b/ov_msckf/scripts/run_ros_tumvi.sh @@ -39,8 +39,8 @@ bagstarttimes=( # location to save log files into save_path1="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_tumvi/algorithms" save_path2="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_tumvi/timings" -bag_path="/home/patrick/datasets/tum_vi/" -ov_ver="2.6.3" +bag_path="/media/patrick/Windows/datasets/tum_vi/" +ov_ver="2.7" #============================================================= diff --git a/ov_msckf/scripts/run_ros_uzhfpv.sh b/ov_msckf/scripts/run_ros_uzhfpv.sh index 1b08f4abb..392a547cb 100755 --- a/ov_msckf/scripts/run_ros_uzhfpv.sh +++ b/ov_msckf/scripts/run_ros_uzhfpv.sh @@ -82,8 +82,8 @@ bagstarttimes=( # location to save log files into save_path1="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_uzhfpv/algorithms" save_path2="/home/patrick/github/pubs_data/pgeneva/2023_openvins_reproduce/exp_uzhfpv/timings" -bag_path="/home/patrick/datasets/euroc_mav/" -ov_ver="2.6.3" +bag_path="/media/patrick/RPNG FLASH 3/" +ov_ver="2.7" #============================================================= diff --git a/ov_msckf/scripts/run_sim_calib.sh b/ov_msckf/scripts/run_sim_calib.sh index 40a7d72ce..c1837ea66 100755 --- a/ov_msckf/scripts/run_sim_calib.sh +++ b/ov_msckf/scripts/run_sim_calib.sh @@ -10,10 +10,10 @@ source ${SCRIPT_DIR}/../../../../devel/setup.bash # datasets datasets=( - "udel_gore" +# "udel_gore" # "udel_arl" # "udel_gore_zupt" -# "tum_corridor1_512_16_okvis" + "tum_corridor1_512_16_okvis" ) # If we want to calibrate parameters diff --git a/ov_msckf/scripts/run_sim_cams.sh b/ov_msckf/scripts/run_sim_cams.sh index 132d02dc4..12f18e753 100755 --- a/ov_msckf/scripts/run_sim_cams.sh +++ b/ov_msckf/scripts/run_sim_cams.sh @@ -10,10 +10,10 @@ source ${SCRIPT_DIR}/../../../../devel/setup.bash # datasets datasets=( - "udel_gore" +# "udel_gore" # "udel_arl" # "udel_gore_zupt" -# "tum_corridor1_512_16_okvis" + "tum_corridor1_512_16_okvis" ) # what modes to use diff --git a/ov_msckf/scripts/run_sim_featrep.sh b/ov_msckf/scripts/run_sim_featrep.sh index b97c81533..3edaa5c0d 100755 --- a/ov_msckf/scripts/run_sim_featrep.sh +++ b/ov_msckf/scripts/run_sim_featrep.sh @@ -10,10 +10,10 @@ source ${SCRIPT_DIR}/../../../../devel/setup.bash # datasets datasets=( - "udel_gore" +# "udel_gore" # "udel_arl" # "udel_gore_zupt" -# "tum_corridor1_512_16_okvis" + "tum_corridor1_512_16_okvis" ) # estimator configurations diff --git a/ov_msckf/src/core/VioManager.cpp b/ov_msckf/src/core/VioManager.cpp index 0fc67b2f0..6a0d7b914 100644 --- a/ov_msckf/src/core/VioManager.cpp +++ b/ov_msckf/src/core/VioManager.cpp @@ -69,6 +69,18 @@ VioManager::VioManager(VioManagerOptions ¶ms_) : thread_init_running(false), // Create the state!! state = std::make_shared(params.state_options); + // Set the IMU intrinsics + state->_calib_imu_dw->set_value(params.vec_dw); + state->_calib_imu_dw->set_fej(params.vec_dw); + state->_calib_imu_da->set_value(params.vec_da); + state->_calib_imu_da->set_fej(params.vec_da); + state->_calib_imu_tg->set_value(params.vec_tg); + state->_calib_imu_tg->set_fej(params.vec_tg); + state->_calib_imu_GYROtoIMU->set_value(params.q_GYROtoIMU); + state->_calib_imu_GYROtoIMU->set_fej(params.q_GYROtoIMU); + state->_calib_imu_ACCtoIMU->set_value(params.q_ACCtoIMU); + state->_calib_imu_ACCtoIMU->set_fej(params.q_ACCtoIMU); + // Timeoffset from camera to IMU Eigen::VectorXd temp_camimu_dt; temp_camimu_dt.resize(1); @@ -667,4 +679,36 @@ void VioManager::do_feature_propagate_update(const ov_core::CameraData &message) calib->quat()(3), calib->pos()(0), calib->pos()(1), calib->pos()(2)); } } + + // Debug for imu intrinsics + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + PRINT_INFO("q_GYROtoI = %.3f,%.3f,%.3f,%.3f\n", state->_calib_imu_GYROtoIMU->value()(0), state->_calib_imu_GYROtoIMU->value()(1), + state->_calib_imu_GYROtoIMU->value()(2), state->_calib_imu_GYROtoIMU->value()(3)); + } + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::RPNG) { + PRINT_INFO("q_ACCtoI = %.3f,%.3f,%.3f,%.3f\n", state->_calib_imu_ACCtoIMU->value()(0), state->_calib_imu_ACCtoIMU->value()(1), + state->_calib_imu_ACCtoIMU->value()(2), state->_calib_imu_ACCtoIMU->value()(3)); + } + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + PRINT_INFO("Dw = | %.4f,%.4f,%.4f | %.4f,%.4f | %.4f |\n", state->_calib_imu_dw->value()(0), state->_calib_imu_dw->value()(1), + state->_calib_imu_dw->value()(2), state->_calib_imu_dw->value()(3), state->_calib_imu_dw->value()(4), + state->_calib_imu_dw->value()(5)); + PRINT_INFO("Da = | %.4f,%.4f,%.4f | %.4f,%.4f | %.4f |\n", state->_calib_imu_da->value()(0), state->_calib_imu_da->value()(1), + state->_calib_imu_da->value()(2), state->_calib_imu_da->value()(3), state->_calib_imu_da->value()(4), + state->_calib_imu_da->value()(5)); + } + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::RPNG) { + PRINT_INFO("Dw = | %.4f | %.4f,%.4f | %.4f,%.4f,%.4f |\n", state->_calib_imu_dw->value()(0), state->_calib_imu_dw->value()(1), + state->_calib_imu_dw->value()(2), state->_calib_imu_dw->value()(3), state->_calib_imu_dw->value()(4), + state->_calib_imu_dw->value()(5)); + PRINT_INFO("Da = | %.4f | %.4f,%.4f | %.4f,%.4f,%.4f |\n", state->_calib_imu_da->value()(0), state->_calib_imu_da->value()(1), + state->_calib_imu_da->value()(2), state->_calib_imu_da->value()(3), state->_calib_imu_da->value()(4), + state->_calib_imu_da->value()(5)); + } + if (state->_options.do_calib_imu_intrinsics && state->_options.do_calib_imu_g_sensitivity) { + PRINT_INFO("Tg = | %.4f,%.4f,%.4f | %.4f,%.4f,%.4f | %.4f,%.4f,%.4f |\n", state->_calib_imu_tg->value()(0), + state->_calib_imu_tg->value()(1), state->_calib_imu_tg->value()(2), state->_calib_imu_tg->value()(3), + state->_calib_imu_tg->value()(4), state->_calib_imu_tg->value()(5), state->_calib_imu_tg->value()(6), + state->_calib_imu_tg->value()(7), state->_calib_imu_tg->value()(8)); + } } diff --git a/ov_msckf/src/core/VioManager.h b/ov_msckf/src/core/VioManager.h index 56f85a1e2..3fd708901 100644 --- a/ov_msckf/src/core/VioManager.h +++ b/ov_msckf/src/core/VioManager.h @@ -96,7 +96,7 @@ class VioManager { void initialize_with_gt(Eigen::Matrix imustate); /// If we are initialized or not - bool initialized() { return is_initialized_vio; } + bool initialized() { return is_initialized_vio && timelastupdate != -1; } /// Timestamp that the system was initialized at double initialized_time() { return startup_time; } diff --git a/ov_msckf/src/core/VioManagerOptions.h b/ov_msckf/src/core/VioManagerOptions.h index 1c3f39b69..0df7d159c 100644 --- a/ov_msckf/src/core/VioManagerOptions.h +++ b/ov_msckf/src/core/VioManagerOptions.h @@ -61,9 +61,11 @@ struct VioManagerOptions { */ void print_and_load(const std::shared_ptr &parser = nullptr) { print_and_load_estimator(parser); + print_and_load_trackers(parser); print_and_load_noise(parser); + + // needs to be called last print_and_load_state(parser); - print_and_load_trackers(parser); } // ESTIMATOR =============================== @@ -130,7 +132,7 @@ struct VioManagerOptions { // NOISE / CHI2 ============================ - /// IMU noise (gyroscope and accelerometer) + /// Continuous-time IMU noise (gyroscope and accelerometer) NoiseManager imu_noises; /// Update options for MSCKF features (pixel noise and chi2 multiplier) @@ -158,10 +160,6 @@ struct VioManagerOptions { parser->parse_external("relative_config_imu", "imu0", "gyroscope_random_walk", imu_noises.sigma_wb); parser->parse_external("relative_config_imu", "imu0", "accelerometer_noise_density", imu_noises.sigma_a); parser->parse_external("relative_config_imu", "imu0", "accelerometer_random_walk", imu_noises.sigma_ab); - imu_noises.sigma_w_2 = std::pow(imu_noises.sigma_w, 2); - imu_noises.sigma_wb_2 = std::pow(imu_noises.sigma_wb, 2); - imu_noises.sigma_a_2 = std::pow(imu_noises.sigma_a, 2); - imu_noises.sigma_ab_2 = std::pow(imu_noises.sigma_ab, 2); } imu_noises.print(); if (parser != nullptr) { @@ -191,6 +189,21 @@ struct VioManagerOptions { /// Gravity magnitude in the global frame (i.e. should be 9.81 typically) double gravity_mag = 9.81; + /// Gyroscope IMU intrinsics (scale imperfection and axis misalignment, column-wise, inverse) + Eigen::Matrix vec_dw; + + /// Accelerometer IMU intrinsics (scale imperfection and axis misalignment, column-wise, inverse) + Eigen::Matrix vec_da; + + /// Gyroscope gravity sensitivity (scale imperfection and axis misalignment, column-wise) + Eigen::Matrix vec_tg; + + /// Rotation from gyroscope frame to the "IMU" accelerometer frame + Eigen::Matrix q_ACCtoIMU; + + /// Rotation from accelerometer to the "IMU" gyroscope frame frame + Eigen::Matrix q_GYROtoIMU; + /// Time offset between camera and IMU. double calib_camimu_dt = 0.0; @@ -215,8 +228,6 @@ struct VioManagerOptions { void print_and_load_state(const std::shared_ptr &parser = nullptr) { if (parser != nullptr) { parser->parse_config("gravity_mag", gravity_mag); - parser->parse_config("max_cameras", state_options.num_cameras); // might be redundant - parser->parse_config("downsample_cameras", downsample_cameras); // might be redundant for (int i = 0; i < state_options.num_cameras; i++) { // Time offset (use the first one) @@ -247,7 +258,6 @@ struct VioManagerOptions { parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "resolution", matrix_wh); matrix_wh.at(0) /= (downsample_cameras) ? 2.0 : 1.0; matrix_wh.at(1) /= (downsample_cameras) ? 2.0 : 1.0; - std::pair wh(matrix_wh.at(0), matrix_wh.at(1)); // Extrinsics Eigen::Matrix4d T_CtoI = Eigen::Matrix4d::Identity(); @@ -280,9 +290,65 @@ struct VioManagerOptions { PRINT_ERROR(RED "\t- mask%d - %s\n" RESET, i, total_mask_path.c_str()); std::exit(EXIT_FAILURE); } - masks.insert({i, cv::imread(total_mask_path, cv::IMREAD_GRAYSCALE)}); + cv::Mat mask = cv::imread(total_mask_path, cv::IMREAD_GRAYSCALE); + masks.insert({i, mask}); + if (mask.cols != camera_intrinsics.at(i)->w() || mask.rows != camera_intrinsics.at(i)->h()) { + PRINT_ERROR(RED "VioManager(): mask size does not match camera!\n" RESET); + PRINT_ERROR(RED "\t- mask%d - %s\n" RESET, i, total_mask_path.c_str()); + PRINT_ERROR(RED "\t- mask%d - %d x %d\n" RESET, mask.cols, mask.rows); + PRINT_ERROR(RED "\t- cam%d - %d x %d\n" RESET, camera_intrinsics.at(i)->w(), camera_intrinsics.at(i)->h()); + std::exit(EXIT_FAILURE); + } } } + + // IMU intrinsics + Eigen::Matrix3d Tw = Eigen::Matrix3d::Identity(); + parser->parse_external("relative_config_imu", "imu0", "Tw", Tw); + Eigen::Matrix3d Ta = Eigen::Matrix3d::Identity(); + parser->parse_external("relative_config_imu", "imu0", "Ta", Ta); + Eigen::Matrix3d R_IMUtoACC = Eigen::Matrix3d::Identity(); + parser->parse_external("relative_config_imu", "imu0", "R_IMUtoACC", R_IMUtoACC); + Eigen::Matrix3d R_IMUtoGYRO = Eigen::Matrix3d::Identity(); + parser->parse_external("relative_config_imu", "imu0", "R_IMUtoGYRO", R_IMUtoGYRO); + Eigen::Matrix3d Tg = Eigen::Matrix3d::Zero(); + parser->parse_external("relative_config_imu", "imu0", "Tg", Tg); + + // Generate the parameters we need + // TODO: error here if this returns a NaN value (i.e. invalid matrix specified) + Eigen::Matrix3d Dw = Tw.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d Da = Ta.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d R_ACCtoIMU = R_IMUtoACC.transpose(); + Eigen::Matrix3d R_GYROtoIMU = R_IMUtoGYRO.transpose(); + if (std::isnan(Tw.norm()) || std::isnan(Dw.norm())) { + std::stringstream ss; + ss << "gyroscope has bad intrinsic values!" << std::endl; + ss << "Tw - " << std::endl << Tw << std::endl << std::endl; + ss << "Dw - " << std::endl << Dw << std::endl << std::endl; + PRINT_DEBUG(RED "" RESET, ss.str().c_str()); + std::exit(EXIT_FAILURE); + } + if (std::isnan(Ta.norm()) || std::isnan(Da.norm())) { + std::stringstream ss; + ss << "accelerometer has bad intrinsic values!" << std::endl; + ss << "Ta - " << std::endl << Ta << std::endl << std::endl; + ss << "Da - " << std::endl << Da << std::endl << std::endl; + PRINT_DEBUG(RED "" RESET, ss.str().c_str()); + std::exit(EXIT_FAILURE); + } + + // kalibr model: lower triangular of the matrix and R_GYROtoI + // rpng model: upper triangular of the matrix and R_ACCtoI + if (state_options.imu_model == StateOptions::ImuModel::KALIBR) { + vec_dw << Dw.block<3, 1>(0, 0), Dw.block<2, 1>(1, 1), Dw(2, 2); + vec_da << Da.block<3, 1>(0, 0), Da.block<2, 1>(1, 1), Da(2, 2); + } else { + vec_dw << Dw(0, 0), Dw.block<2, 1>(0, 1), Dw.block<3, 1>(0, 2); + vec_da << Da(0, 0), Da.block<2, 1>(0, 1), Da.block<3, 1>(0, 2); + } + vec_tg << Tg.block<3, 1>(0, 0), Tg.block<3, 1>(0, 1), Tg.block<3, 1>(0, 2); + q_GYROtoIMU = ov_core::rot_2_quat(R_GYROtoIMU); + q_ACCtoIMU = ov_core::rot_2_quat(R_ACCtoIMU); } PRINT_DEBUG("STATE PARAMETERS:\n"); PRINT_DEBUG(" - gravity_mag: %.4f\n", gravity_mag); @@ -297,6 +363,7 @@ struct VioManagerOptions { std::exit(EXIT_FAILURE); } PRINT_DEBUG(" - calib_camimu_dt: %.4f\n", calib_camimu_dt); + PRINT_DEBUG("CAMERA PARAMETERS:\n"); for (int n = 0; n < state_options.num_cameras; n++) { std::stringstream ss; ss << "cam_" << n << "_fisheye:" << (std::dynamic_pointer_cast(camera_intrinsics.at(n)) != nullptr) << std::endl; @@ -313,6 +380,15 @@ struct VioManagerOptions { ss << "T_C" << n << "toI:" << std::endl << T_CtoI << std::endl << std::endl; PRINT_DEBUG(ss.str().c_str()); } + PRINT_DEBUG("IMU PARAMETERS:\n"); + std::stringstream ss; + ss << "imu model:" << ((state_options.imu_model == StateOptions::ImuModel::KALIBR) ? "kalibr" : "rpng") << std::endl; + ss << "Dw (columnwise):" << vec_dw.transpose() << std::endl; + ss << "Da (columnwise):" << vec_da.transpose() << std::endl; + ss << "Tg (columnwise):" << vec_tg.transpose() << std::endl; + ss << "q_GYROtoI: " << q_GYROtoIMU.transpose() << std::endl; + ss << "q_ACCtoI: " << q_ACCtoIMU.transpose() << std::endl; + PRINT_DEBUG(ss.str().c_str()); } // TRACKERS =============================== diff --git a/ov_msckf/src/dummy.cpp b/ov_msckf/src/dummy.cpp index 0931284a4..547983c38 100644 --- a/ov_msckf/src/dummy.cpp +++ b/ov_msckf/src/dummy.cpp @@ -37,9 +37,10 @@ * The key features of the system are the following: * * - Sliding stochastic imu clones - * - First estimate Jacobians + * - First-Estimate Jacobians to maintain consistency * - Camera intrinsics and extrinsic online calibration * - Time offset between camera and imu calibration + * - Inertial intrinsic calibration (including g-sensitivity) * - Standard MSCKF features with nullspace projection * - 3d SLAM feature support (with six different representations) * - Generic simulation of trajectories through and environment (see @ref ov_msckf::Simulator) diff --git a/ov_msckf/src/ros/ROS1Visualizer.cpp b/ov_msckf/src/ros/ROS1Visualizer.cpp index a665bd357..6e1cf127d 100644 --- a/ov_msckf/src/ros/ROS1Visualizer.cpp +++ b/ov_msckf/src/ros/ROS1Visualizer.cpp @@ -119,8 +119,10 @@ ROS1Visualizer::ROS1Visualizer(std::shared_ptr nh, std::shared_ // Open the files of_state_est.open(filepath_est.c_str()); of_state_std.open(filepath_std.c_str()); - of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; - of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; + of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; // Groundtruth if we are simulating if (_sim != nullptr) { @@ -128,7 +130,8 @@ ROS1Visualizer::ROS1Visualizer(std::shared_ptr nh, std::shared_ boost::filesystem::remove(filepath_gt); boost::filesystem::create_directories(boost::filesystem::path(filepath_gt.c_str()).parent_path()); of_state_gt.open(filepath_gt.c_str()); - of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; } } @@ -378,6 +381,41 @@ void ROS1Visualizer::visualize_final() { } } + // IMU intrinsics + if (_app->get_state()->_options.do_calib_imu_intrinsics) { + Eigen::Matrix3d Dw = State::Dm(_app->get_state()->_options.imu_model, _app->get_state()->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(_app->get_state()->_options.imu_model, _app->get_state()->_calib_imu_da->value()); + Eigen::Matrix3d Tw = Dw.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d Ta = Da.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d R_IMUtoACC = _app->get_state()->_calib_imu_ACCtoIMU->Rot().transpose(); + Eigen::Matrix3d R_IMUtoGYRO = _app->get_state()->_calib_imu_GYROtoIMU->Rot().transpose(); + PRINT_INFO(REDPURPLE "Tw:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Tw(0, 0), Tw(0, 1), Tw(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Tw(1, 0), Tw(1, 1), Tw(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, Tw(2, 0), Tw(2, 1), Tw(2, 2)); + PRINT_INFO(REDPURPLE "Ta:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Ta(0, 0), Ta(0, 1), Ta(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Ta(1, 0), Ta(1, 1), Ta(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, Ta(2, 0), Ta(2, 1), Ta(2, 2)); + PRINT_INFO(REDPURPLE "R_IMUtoACC:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoACC(0, 0), R_IMUtoACC(0, 1), R_IMUtoACC(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoACC(1, 0), R_IMUtoACC(1, 1), R_IMUtoACC(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, R_IMUtoACC(2, 0), R_IMUtoACC(2, 1), R_IMUtoACC(2, 2)); + PRINT_INFO(REDPURPLE "R_IMUtoGYRO:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoGYRO(0, 0), R_IMUtoGYRO(0, 1), R_IMUtoGYRO(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoGYRO(1, 0), R_IMUtoGYRO(1, 1), R_IMUtoGYRO(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, R_IMUtoGYRO(2, 0), R_IMUtoGYRO(2, 1), R_IMUtoGYRO(2, 2)); + } + + // IMU intrinsics gravity sensitivity + if (_app->get_state()->_options.do_calib_imu_g_sensitivity) { + Eigen::Matrix3d Tg = State::Tg(_app->get_state()->_calib_imu_tg->value()); + PRINT_INFO(REDPURPLE "Tg:\n" RESET); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f,\n" RESET, Tg(0, 0), Tg(0, 1), Tg(0, 2)); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f,\n" RESET, Tg(1, 0), Tg(1, 1), Tg(1, 2)); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f\n\n" RESET, Tg(2, 0), Tg(2, 1), Tg(2, 2)); + } + // Publish RMSE if we have it if (!gt_states.empty()) { PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number)); diff --git a/ov_msckf/src/ros/ROS2Visualizer.cpp b/ov_msckf/src/ros/ROS2Visualizer.cpp index ef6f0ff1f..e91cd7430 100644 --- a/ov_msckf/src/ros/ROS2Visualizer.cpp +++ b/ov_msckf/src/ros/ROS2Visualizer.cpp @@ -131,8 +131,10 @@ ROS2Visualizer::ROS2Visualizer(std::shared_ptr node, std::shared_p // Open the files of_state_est.open(filepath_est.c_str()); of_state_std.open(filepath_std.c_str()); - of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; - of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + of_state_est << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; + of_state_std << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; // Groundtruth if we are simulating if (_sim != nullptr) { @@ -140,7 +142,8 @@ ROS2Visualizer::ROS2Visualizer(std::shared_ptr node, std::shared_p boost::filesystem::remove(filepath_gt); boost::filesystem::create_directories(boost::filesystem::path(filepath_gt.c_str()).parent_path()); of_state_gt.open(filepath_gt.c_str()); - of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans .... etc" << std::endl; + of_state_gt << "# timestamp(s) q p v bg ba cam_imu_dt num_cam cam0_k cam0_d cam0_rot cam0_trans ... imu_model dw da tg wtoI atoI etc" + << std::endl; } } @@ -378,6 +381,41 @@ void ROS2Visualizer::visualize_final() { } } + // IMU intrinsics + if (_app->get_state()->_options.do_calib_imu_intrinsics) { + Eigen::Matrix3d Dw = State::Dm(_app->get_state()->_options.imu_model, _app->get_state()->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(_app->get_state()->_options.imu_model, _app->get_state()->_calib_imu_da->value()); + Eigen::Matrix3d Tw = Dw.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d Ta = Da.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d R_IMUtoACC = _app->get_state()->_calib_imu_ACCtoIMU->Rot().transpose(); + Eigen::Matrix3d R_IMUtoGYRO = _app->get_state()->_calib_imu_GYROtoIMU->Rot().transpose(); + PRINT_INFO(REDPURPLE "Tw:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Tw(0, 0), Tw(0, 1), Tw(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Tw(1, 0), Tw(1, 1), Tw(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, Tw(2, 0), Tw(2, 1), Tw(2, 2)); + PRINT_INFO(REDPURPLE "Ta:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Ta(0, 0), Ta(0, 1), Ta(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, Ta(1, 0), Ta(1, 1), Ta(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, Ta(2, 0), Ta(2, 1), Ta(2, 2)); + PRINT_INFO(REDPURPLE "R_IMUtoACC:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoACC(0, 0), R_IMUtoACC(0, 1), R_IMUtoACC(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoACC(1, 0), R_IMUtoACC(1, 1), R_IMUtoACC(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, R_IMUtoACC(2, 0), R_IMUtoACC(2, 1), R_IMUtoACC(2, 2)); + PRINT_INFO(REDPURPLE "R_IMUtoGYRO:\n" RESET); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoGYRO(0, 0), R_IMUtoGYRO(0, 1), R_IMUtoGYRO(0, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f,\n" RESET, R_IMUtoGYRO(1, 0), R_IMUtoGYRO(1, 1), R_IMUtoGYRO(1, 2)); + PRINT_INFO(REDPURPLE "%.5f,%.5f,%.5f\n\n" RESET, R_IMUtoGYRO(2, 0), R_IMUtoGYRO(2, 1), R_IMUtoGYRO(2, 2)); + } + + // IMU intrinsics gravity sensitivity + if (_app->get_state()->_options.do_calib_imu_g_sensitivity) { + Eigen::Matrix3d Tg = State::Tg(_app->get_state()->_calib_imu_tg->value()); + PRINT_INFO(REDPURPLE "Tg:\n" RESET); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f,\n" RESET, Tg(0, 0), Tg(0, 1), Tg(0, 2)); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f,\n" RESET, Tg(1, 0), Tg(1, 1), Tg(1, 2)); + PRINT_INFO(REDPURPLE "%.6f,%.6f,%.6f\n\n" RESET, Tg(2, 0), Tg(2, 1), Tg(2, 2)); + } + // Publish RMSE if we have it if (!gt_states.empty()) { PRINT_INFO(REDPURPLE "RMSE: %.3f (deg) orientation\n" RESET, std::sqrt(summed_mse_ori / summed_number)); diff --git a/ov_msckf/src/ros/ROSVisualizerHelper.cpp b/ov_msckf/src/ros/ROSVisualizerHelper.cpp index 0fd835a71..b18474b37 100644 --- a/ov_msckf/src/ros/ROSVisualizerHelper.cpp +++ b/ov_msckf/src/ros/ROSVisualizerHelper.cpp @@ -206,6 +206,27 @@ void ROSVisualizerHelper::sim_save_total_state_to_file(std::shared_ptr st << " " << sim->get_true_parameters().camera_extrinsics.at(i)(6) << " "; } + // Get the base IMU information + of_state_gt.precision(0); + of_state_gt << sim->get_true_parameters().state_options.imu_model << " "; + of_state_gt.precision(8); + + of_state_gt << sim->get_true_parameters().vec_dw(0) << " " << sim->get_true_parameters().vec_dw(1) << " " + << sim->get_true_parameters().vec_dw(2) << " " << sim->get_true_parameters().vec_dw(3) << " " + << sim->get_true_parameters().vec_dw(4) << " " << sim->get_true_parameters().vec_dw(5) << " "; + of_state_gt << sim->get_true_parameters().vec_da(0) << " " << sim->get_true_parameters().vec_da(1) << " " + << sim->get_true_parameters().vec_da(2) << " " << sim->get_true_parameters().vec_da(3) << " " + << sim->get_true_parameters().vec_da(4) << " " << sim->get_true_parameters().vec_da(5) << " "; + of_state_gt << sim->get_true_parameters().vec_tg(0) << " " << sim->get_true_parameters().vec_tg(1) << " " + << sim->get_true_parameters().vec_tg(2) << " " << sim->get_true_parameters().vec_tg(3) << " " + << sim->get_true_parameters().vec_tg(4) << " " << sim->get_true_parameters().vec_tg(5) << " " + << sim->get_true_parameters().vec_tg(6) << " " << sim->get_true_parameters().vec_tg(7) << " " + << sim->get_true_parameters().vec_tg(8) << " "; + of_state_gt << sim->get_true_parameters().q_GYROtoIMU(0) << " " << sim->get_true_parameters().q_GYROtoIMU(1) << " " + << sim->get_true_parameters().q_GYROtoIMU(2) << " " << sim->get_true_parameters().q_GYROtoIMU(3) << " "; + of_state_gt << sim->get_true_parameters().q_ACCtoIMU(0) << " " << sim->get_true_parameters().q_ACCtoIMU(1) << " " + << sim->get_true_parameters().q_ACCtoIMU(2) << " " << sim->get_true_parameters().q_ACCtoIMU(3) << " "; + // New line of_state_gt << endl; } @@ -298,6 +319,82 @@ void ROSVisualizerHelper::sim_save_total_state_to_file(std::shared_ptr st } } + // imu intrinsics: what model we are using + of_state_est.precision(0); + of_state_est << state->_options.imu_model << " "; + of_state_est.precision(8); + of_state_std.precision(0); + of_state_std << state->_options.imu_model << " "; + of_state_std.precision(8); + + // imu intrinsics: dw + of_state_est << state->_calib_imu_dw->value()(0) << " " << state->_calib_imu_dw->value()(1) << " " << state->_calib_imu_dw->value()(2) + << " " << state->_calib_imu_dw->value()(3) << " " << state->_calib_imu_dw->value()(4) << " " + << state->_calib_imu_dw->value()(5) << " "; + if (state->_options.do_calib_imu_intrinsics) { + int index_dw = state->_calib_imu_dw->id(); + of_state_std << std::sqrt(cov(index_dw + 0, index_dw + 0)) << " " << std::sqrt(cov(index_dw + 1, index_dw + 1)) << " " + << std::sqrt(cov(index_dw + 2, index_dw + 2)) << " " << std::sqrt(cov(index_dw + 3, index_dw + 3)) << " " + << std::sqrt(cov(index_dw + 4, index_dw + 4)) << " " << std::sqrt(cov(index_dw + 5, index_dw + 5)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + + // imu intrinsics: da + of_state_est << state->_calib_imu_da->value()(0) << " " << state->_calib_imu_da->value()(1) << " " << state->_calib_imu_da->value()(2) + << " " << state->_calib_imu_da->value()(3) << " " << state->_calib_imu_da->value()(4) << " " + << state->_calib_imu_da->value()(5) << " "; + if (state->_options.do_calib_imu_intrinsics) { + int index_da = state->_calib_imu_da->id(); + of_state_std << std::sqrt(cov(index_da + 0, index_da + 0)) << " " << std::sqrt(cov(index_da + 1, index_da + 1)) << " " + << std::sqrt(cov(index_da + 2, index_da + 2)) << " " << std::sqrt(cov(index_da + 3, index_da + 3)) << " " + << std::sqrt(cov(index_da + 4, index_da + 4)) << " " << std::sqrt(cov(index_da + 5, index_da + 5)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + + // imu intrinsics: tg + of_state_est << state->_calib_imu_tg->value()(0) << " " << state->_calib_imu_tg->value()(1) << " " << state->_calib_imu_tg->value()(2) + << " " << state->_calib_imu_tg->value()(3) << " " << state->_calib_imu_tg->value()(4) << " " + << state->_calib_imu_tg->value()(5) << " " << state->_calib_imu_tg->value()(6) << " " << state->_calib_imu_tg->value()(7) + << " " << state->_calib_imu_tg->value()(8) << " "; + if (state->_options.do_calib_imu_intrinsics && state->_options.do_calib_imu_g_sensitivity) { + int index_tg = state->_calib_imu_tg->id(); + of_state_std << std::sqrt(cov(index_tg + 0, index_tg + 0)) << " " << std::sqrt(cov(index_tg + 1, index_tg + 1)) << " " + << std::sqrt(cov(index_tg + 2, index_tg + 2)) << " " << std::sqrt(cov(index_tg + 3, index_tg + 3)) << " " + << std::sqrt(cov(index_tg + 4, index_tg + 4)) << " " << std::sqrt(cov(index_tg + 5, index_tg + 5)) << " " + << std::sqrt(cov(index_tg + 6, index_tg + 6)) << " " << std::sqrt(cov(index_tg + 7, index_tg + 7)) << " " + << std::sqrt(cov(index_tg + 8, index_tg + 8)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + + // imu intrinsics: kalibr R_gyrotoI + of_state_est << state->_calib_imu_GYROtoIMU->value()(0) << " " << state->_calib_imu_GYROtoIMU->value()(1) << " " + << state->_calib_imu_GYROtoIMU->value()(2) << " " << state->_calib_imu_GYROtoIMU->value()(3) << " "; + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + int index_wtoI = state->_calib_imu_GYROtoIMU->id(); + of_state_std << std::sqrt(cov(index_wtoI + 0, index_wtoI + 0)) << " " << std::sqrt(cov(index_wtoI + 1, index_wtoI + 1)) << " " + << std::sqrt(cov(index_wtoI + 2, index_wtoI + 2)) << " " << std::sqrt(cov(index_wtoI + 3, index_wtoI + 3)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + + // imu intrinsics: rpng R_acctoI + of_state_est << state->_calib_imu_ACCtoIMU->value()(0) << " " << state->_calib_imu_ACCtoIMU->value()(1) << " " + << state->_calib_imu_ACCtoIMU->value()(2) << " " << state->_calib_imu_ACCtoIMU->value()(3) << " "; + if (state->_options.do_calib_imu_intrinsics && state->_options.imu_model == StateOptions::ImuModel::RPNG) { + int index_atoI = state->_calib_imu_ACCtoIMU->id(); + of_state_std << std::sqrt(cov(index_atoI + 0, index_atoI + 0)) << " " << std::sqrt(cov(index_atoI + 1, index_atoI + 1)) << " " + << std::sqrt(cov(index_atoI + 2, index_atoI + 2)) << " " << std::sqrt(cov(index_atoI + 3, index_atoI + 3)) << " "; + } else { + of_state_std << 0.0 << " " << 0.0 << " " << 0.0 << " "; + } + // Done with the estimates! of_state_est << endl; of_state_std << endl; diff --git a/ov_msckf/src/sim/Simulator.cpp b/ov_msckf/src/sim/Simulator.cpp index 7101b4176..a37d5ed11 100644 --- a/ov_msckf/src/sim/Simulator.cpp +++ b/ov_msckf/src/sim/Simulator.cpp @@ -25,6 +25,7 @@ #include "cam/CamEqui.h" #include "cam/CamRadtan.h" #include "sim/BsplineSE3.h" +#include "state/State.h" #include "utils/colors.h" #include "utils/dataset_reader.h" @@ -237,6 +238,30 @@ void Simulator::perturb_parameters(std::mt19937 gen_state, VioManagerOptions &pa params_.camera_extrinsics.at(i)(r) += 0.01 * w(gen_state); } } + + // If we need to perturb the imu intrinsics + if (params_.state_options.do_calib_imu_intrinsics) { + for (int j = 0; j < 6; j++) { + params_.vec_dw(j) += 0.004 * w(gen_state); + params_.vec_da(j) += 0.004 * w(gen_state); + } + if (params_.state_options.imu_model == StateOptions::ImuModel::KALIBR) { + Eigen::Vector3d w_vec; + w_vec << 0.002 * w(gen_state), 0.002 * w(gen_state), 0.002 * w(gen_state); + params_.q_GYROtoIMU = rot_2_quat(exp_so3(w_vec) * quat_2_Rot(params_.q_GYROtoIMU)); + } else { + Eigen::Vector3d w_vec; + w_vec << 0.002 * w(gen_state), 0.002 * w(gen_state), 0.002 * w(gen_state); + params_.q_ACCtoIMU = rot_2_quat(exp_so3(w_vec) * quat_2_Rot(params_.q_ACCtoIMU)); + } + } + + // If we need to perturb gravity sensitivity + if (params_.state_options.do_calib_imu_g_sensitivity) { + for (int j = 0; j < 9; j++) { + params_.vec_tg(j) += 0.004 * w(gen_state); + } + } } bool Simulator::get_state(double desired_time, Eigen::Matrix &imustate) { @@ -312,10 +337,23 @@ bool Simulator::get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vecto } // Transform omega and linear acceleration into imu frame - Eigen::Vector3d omega_inI = w_IinI; Eigen::Vector3d gravity; gravity << 0.0, 0.0, params.gravity_mag; Eigen::Vector3d accel_inI = R_GtoI * (a_IinG + gravity); + Eigen::Vector3d omega_inI = w_IinI; + + // Get our imu intrinsic parameters + // - kalibr: lower triangular of the matrix is used + // - rpng: upper triangular of the matrix is used + Eigen::Matrix3d Dw = State::Dm(params.state_options.imu_model, params.vec_dw); + Eigen::Matrix3d Da = State::Dm(params.state_options.imu_model, params.vec_da); + Eigen::Matrix3d Tg = State::Tg(params.vec_tg); + + // Get the readings with the imu intrinsic "distortion" + Eigen::Matrix3d Tw = Dw.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Matrix3d Ta = Da.colPivHouseholderQr().solve(Eigen::Matrix3d::Identity()); + Eigen::Vector3d omega_inGYRO = Tw * quat_2_Rot(params.q_GYROtoIMU).transpose() * omega_inI + Tg * accel_inI; + Eigen::Vector3d accel_inACC = Ta * quat_2_Rot(params.q_ACCtoIMU).transpose() * accel_inI; // Calculate the bias values for this IMU reading // NOTE: we skip the first ever bias since we have already appended it @@ -339,12 +377,12 @@ bool Simulator::get_next_imu(double &time_imu, Eigen::Vector3d &wm, Eigen::Vecto has_skipped_first_bias = true; // Now add noise to these measurements - wm(0) = omega_inI(0) + true_bias_gyro(0) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); - wm(1) = omega_inI(1) + true_bias_gyro(1) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); - wm(2) = omega_inI(2) + true_bias_gyro(2) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); - am(0) = accel_inI(0) + true_bias_accel(0) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); - am(1) = accel_inI(1) + true_bias_accel(1) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); - am(2) = accel_inI(2) + true_bias_accel(2) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); + wm(0) = omega_inGYRO(0) + true_bias_gyro(0) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); + wm(1) = omega_inGYRO(1) + true_bias_gyro(1) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); + wm(2) = omega_inGYRO(2) + true_bias_gyro(2) + params.imu_noises.sigma_w / std::sqrt(dt) * w(gen_meas_imu); + am(0) = accel_inACC(0) + true_bias_accel(0) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); + am(1) = accel_inACC(1) + true_bias_accel(1) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); + am(2) = accel_inACC(2) + true_bias_accel(2) + params.imu_noises.sigma_a / std::sqrt(dt) * w(gen_meas_imu); // Return success return true; @@ -445,8 +483,7 @@ std::vector> Simulator::project_pointcloud(co uv_norm << (float)(p_FinC(0) / p_FinC(2)), (float)(p_FinC(1) / p_FinC(2)); // Distort the normalized coordinates - Eigen::Vector2f uv_dist; - uv_dist = camera->distort_f(uv_norm); + Eigen::Vector2f uv_dist = camera->distort_f(uv_norm); // Check that it is inside our bounds if (uv_dist(0) < 0 || uv_dist(0) > camera->w() || uv_dist(1) < 0 || uv_dist(1) > camera->h()) { @@ -487,8 +524,7 @@ void Simulator::generate_points(const Eigen::Matrix3d &R_GtoI, const Eigen::Vect cv::Point2f uv_dist((float)u_dist, (float)v_dist); // Undistort this point to our normalized coordinates - cv::Point2f uv_norm; - uv_norm = camera->undistort_cv(uv_dist); + cv::Point2f uv_norm = camera->undistort_cv(uv_dist); // Generate a random depth std::uniform_real_distribution gen_depth(params.sim_min_feature_gen_distance, params.sim_max_feature_gen_distance); diff --git a/ov_msckf/src/state/Propagator.cpp b/ov_msckf/src/state/Propagator.cpp index 4a0ac187a..47879ecca 100644 --- a/ov_msckf/src/state/Propagator.cpp +++ b/ov_msckf/src/state/Propagator.cpp @@ -23,6 +23,7 @@ #include "state/State.h" #include "state/StateHelper.h" +#include "utils/print.h" #include "utils/quat_ops.h" using namespace ov_core; @@ -72,8 +73,8 @@ void Propagator::propagate_and_clone(std::shared_ptr state, double timest // Q_summed = Phi_i*Q_summed*Phi_i^T + Q_i // After summing we can multiple the total phi to get the updated covariance // We will then add the noise to the IMU portion of the state - Eigen::Matrix Phi_summed = Eigen::Matrix::Identity(); - Eigen::Matrix Qd_summed = Eigen::Matrix::Zero(); + Eigen::MatrixXd Phi_summed = Eigen::MatrixXd::Identity(state->imu_intrinsic_size() + 15, state->imu_intrinsic_size() + 15); + Eigen::MatrixXd Qd_summed = Eigen::MatrixXd::Zero(state->imu_intrinsic_size() + 15, state->imu_intrinsic_size() + 15); double dt_summed = 0; // Loop through all IMU messages, and use them to move the state forward in time @@ -82,8 +83,7 @@ void Propagator::propagate_and_clone(std::shared_ptr state, double timest for (size_t i = 0; i < prop_data.size() - 1; i++) { // Get the next state Jacobian and noise Jacobian for this IMU reading - Eigen::Matrix F = Eigen::Matrix::Zero(); - Eigen::Matrix Qdi = Eigen::Matrix::Zero(); + Eigen::MatrixXd F, Qdi; predict_and_compute(state, prop_data.at(i), prop_data.at(i + 1), F, Qdi); // Next we should propagate our IMU covariance @@ -101,15 +101,32 @@ void Propagator::propagate_and_clone(std::shared_ptr state, double timest assert(std::abs((time1 - time0) - dt_summed) < 1e-4); // Last angular velocity (used for cloning when estimating time offset) - Eigen::Matrix last_w = Eigen::Matrix::Zero(); - if (prop_data.size() > 1) - last_w = prop_data.at(prop_data.size() - 2).wm - state->_imu->bias_g(); - else if (!prop_data.empty()) - last_w = prop_data.at(prop_data.size() - 1).wm - state->_imu->bias_g(); + // Remember to correct them before we store them + Eigen::Vector3d last_a = Eigen::Vector3d::Zero(); + Eigen::Vector3d last_w = Eigen::Vector3d::Zero(); + if (!prop_data.empty()) { + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + last_a = state->_calib_imu_ACCtoIMU->Rot() * Da * (prop_data.at(prop_data.size() - 1).am - state->_imu->bias_a()); + last_w = state->_calib_imu_GYROtoIMU->Rot() * Dw * (prop_data.at(prop_data.size() - 1).wm - state->_imu->bias_g() - Tg * last_a); + } // Do the update to the covariance with our "summed" state transition and IMU noise addition... std::vector> Phi_order; Phi_order.push_back(state->_imu); + if (state->_options.do_calib_imu_intrinsics) { + Phi_order.push_back(state->_calib_imu_dw); + Phi_order.push_back(state->_calib_imu_da); + if (state->_options.do_calib_imu_g_sensitivity) { + Phi_order.push_back(state->_calib_imu_tg); + } + if (state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + Phi_order.push_back(state->_calib_imu_GYROtoIMU); + } else { + Phi_order.push_back(state->_calib_imu_ACCtoIMU); + } + } StateHelper::EKFPropagation(state, Phi_order, Phi_order, Phi_summed, Qd_summed); // Set timestamp data @@ -147,19 +164,40 @@ bool Propagator::fast_state_propagate(std::shared_ptr state, double times Eigen::Vector3d bias_g = cache_state_est.block(10, 0, 3, 1); Eigen::Vector3d bias_a = cache_state_est.block(13, 0, 3, 1); + // IMU intrinsic calibration estimates (static) + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + Eigen::Matrix3d R_ACCtoIMU = state->_calib_imu_ACCtoIMU->Rot(); + Eigen::Matrix3d R_GYROtoIMU = state->_calib_imu_GYROtoIMU->Rot(); + // Loop through all IMU messages, and use them to move the state forward in time // This uses the zero'th order quat, and then constant acceleration discrete for (size_t i = 0; i < prop_data.size() - 1; i++) { - // Corrected imu measurements - double dt = prop_data.at(i + 1).timestamp - prop_data.at(i).timestamp; - Eigen::Vector3d w_hat = 0.5 * (prop_data.at(i + 1).wm + prop_data.at(i).wm) - bias_g; - Eigen::Vector3d a_hat = 0.5 * (prop_data.at(i + 1).am + prop_data.at(i).am) - bias_a; + // Time elapsed over interval + auto data_minus = prop_data.at(i); + auto data_plus = prop_data.at(i + 1); + double dt = data_plus.timestamp - data_minus.timestamp; + + // Corrected imu acc measurements with our current biases + Eigen::Vector3d a_hat1 = R_ACCtoIMU * Da * (data_minus.am - bias_a); + Eigen::Vector3d a_hat2 = R_ACCtoIMU * Da * (data_plus.am - bias_a); + Eigen::Vector3d a_hat = 0.5 * (a_hat1 + a_hat2); + + // Corrected imu gyro measurements with our current biases + Eigen::Vector3d w_hat1 = R_GYROtoIMU * Dw * (data_minus.wm - bias_g - Tg * a_hat1); + Eigen::Vector3d w_hat2 = R_GYROtoIMU * Dw * (data_plus.wm - bias_g - Tg * a_hat2); + Eigen::Vector3d w_hat = 0.5 * (w_hat1 + w_hat2); + + // Current state estimates Eigen::Matrix3d R_Gtoi = quat_2_Rot(cache_state_est.block(0, 0, 4, 1)); Eigen::Vector3d v_iinG = cache_state_est.block(7, 0, 3, 1); Eigen::Vector3d p_iinG = cache_state_est.block(4, 0, 3, 1); // State transition and noise matrix + // TODO: should probably track the correlations with the IMU intrinsics if we are calibrating + // TODO: currently this just does a quick discrete prediction using only the previous marg IMU uncertainty Eigen::Matrix F = Eigen::Matrix::Zero(); F.block(0, 0, 3, 3) = exp_so3(-w_hat * dt); F.block(0, 9, 3, 3).noalias() = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt; @@ -210,12 +248,14 @@ bool Propagator::fast_state_propagate(std::shared_ptr state, double times state_plus.setZero(); state_plus.block(0, 0, 4, 1) = q_Gtoi; state_plus.block(4, 0, 3, 1) = p_iinG; - state_plus.block(7, 0, 3, 1) = quat_2_Rot(q_Gtoi) * v_iinG; - state_plus.block(10, 0, 3, 1) = 0.5 * (prop_data.at(prop_data.size() - 1).wm + prop_data.at(prop_data.size() - 2).wm) - bias_g; + state_plus.block(7, 0, 3, 1) = quat_2_Rot(q_Gtoi) * v_iinG; // local frame v_iini + Eigen::Vector3d last_a = R_ACCtoIMU * Da * (prop_data.at(prop_data.size() - 1).am - bias_a); + Eigen::Vector3d last_w = R_GYROtoIMU * Dw * (prop_data.at(prop_data.size() - 1).wm - bias_g - Tg * last_a); + state_plus.block(10, 0, 3, 1) = last_w; - // Do a covariance propagation for our velocity + // Do a covariance propagation for our velocity (needs to be in local frame) // TODO: more properly do the covariance of the angular velocity here... - // TODO: it should be dependent on the state bias, thus correlated with the pose + // TODO: it should be dependent on the state bias, thus correlated with the pose.. covariance.setZero(); Eigen::Matrix Phi = Eigen::Matrix::Identity(); Phi.block(6, 6, 3, 3) = quat_2_Rot(q_Gtoi); @@ -353,106 +393,80 @@ std::vector Propagator::select_imu_readings(const std::vector< } void Propagator::predict_and_compute(std::shared_ptr state, const ov_core::ImuData &data_minus, const ov_core::ImuData &data_plus, - Eigen::Matrix &F, Eigen::Matrix &Qd) { - - // Set them to zero - F.setZero(); - Qd.setZero(); + Eigen::MatrixXd &F, Eigen::MatrixXd &Qd) { // Time elapsed over interval double dt = data_plus.timestamp - data_minus.timestamp; // assert(data_plus.timestamp>data_minus.timestamp); - // Corrected imu measurements - Eigen::Matrix w_hat = data_minus.wm - state->_imu->bias_g(); - Eigen::Matrix a_hat = data_minus.am - state->_imu->bias_a(); - Eigen::Matrix w_hat2 = data_plus.wm - state->_imu->bias_g(); - Eigen::Matrix a_hat2 = data_plus.am - state->_imu->bias_a(); + // IMU intrinsic calibration estimates (static) + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + + // Corrected imu acc measurements with our current biases + Eigen::Vector3d a_hat1 = data_minus.am - state->_imu->bias_a(); + Eigen::Vector3d a_hat2 = data_plus.am - state->_imu->bias_a(); + Eigen::Vector3d a_hat_avg = .5 * (a_hat1 + a_hat2); + + // Convert "raw" imu to its corrected frame using the IMU intrinsics + Eigen::Vector3d a_uncorrected = a_hat_avg; + Eigen::Matrix3d R_ACCtoIMU = state->_calib_imu_ACCtoIMU->Rot(); + a_hat1 = R_ACCtoIMU * Da * a_hat1; + a_hat2 = R_ACCtoIMU * Da * a_hat2; + a_hat_avg = R_ACCtoIMU * Da * a_hat_avg; + + // Corrected imu gyro measurements with our current biases and gravity sensitivity + Eigen::Vector3d w_hat1 = data_minus.wm - state->_imu->bias_g() - Tg * a_hat1; + Eigen::Vector3d w_hat2 = data_plus.wm - state->_imu->bias_g() - Tg * a_hat2; + Eigen::Vector3d w_hat_avg = .5 * (w_hat1 + w_hat2); + + // Convert "raw" imu to its corrected frame using the IMU intrinsics + Eigen::Vector3d w_uncorrected = w_hat_avg; + Eigen::Matrix3d R_GYROtoIMU = state->_calib_imu_GYROtoIMU->Rot(); + w_hat1 = R_GYROtoIMU * Dw * w_hat1; + w_hat2 = R_GYROtoIMU * Dw * w_hat2; + w_hat_avg = R_GYROtoIMU * Dw * w_hat_avg; + + // Pre-compute some analytical values for the mean and covariance integration + Eigen::Matrix Xi_sum = Eigen::Matrix::Zero(3, 18); + if (state->_options.integration_method == StateOptions::IntegrationMethod::RK4 || + state->_options.integration_method == StateOptions::IntegrationMethod::ANALYTICAL) { + compute_Xi_sum(state, dt, w_hat_avg, a_hat_avg, Xi_sum); + } // Compute the new state mean value Eigen::Vector4d new_q; Eigen::Vector3d new_v, new_p; - if (state->_options.use_rk4_integration) - predict_mean_rk4(state, dt, w_hat, a_hat, w_hat2, a_hat2, new_q, new_v, new_p); - else - predict_mean_discrete(state, dt, w_hat, a_hat, w_hat2, a_hat2, new_q, new_v, new_p); - - // Get the locations of each entry of the imu state - int th_id = state->_imu->q()->id() - state->_imu->id(); - int p_id = state->_imu->p()->id() - state->_imu->id(); - int v_id = state->_imu->v()->id() - state->_imu->id(); - int bg_id = state->_imu->bg()->id() - state->_imu->id(); - int ba_id = state->_imu->ba()->id() - state->_imu->id(); - - // Allocate noise Jacobian - Eigen::Matrix G = Eigen::Matrix::Zero(); - - // Now compute Jacobian of new state wrt old state and noise - if (state->_options.do_fej) { - - // This is the change in the orientation from the end of the last prop to the current prop - // This is needed since we need to include the "k-th" updated orientation information - Eigen::Matrix Rfej = state->_imu->Rot_fej(); - Eigen::Matrix dR = quat_2_Rot(new_q) * Rfej.transpose(); - - Eigen::Matrix v_fej = state->_imu->vel_fej(); - Eigen::Matrix p_fej = state->_imu->pos_fej(); - - F.block(th_id, th_id, 3, 3) = dR; - F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-w_hat * dt) * dt; - // F.block(th_id, bg_id, 3, 3).noalias() = -dR * Jr_so3(-log_so3(dR)) * dt; - F.block(bg_id, bg_id, 3, 3).setIdentity(); - F.block(v_id, th_id, 3, 3).noalias() = -skew_x(new_v - v_fej + _gravity * dt) * Rfej.transpose(); - // F.block(v_id, th_id, 3, 3).noalias() = -Rfej.transpose() * skew_x(Rfej*(new_v-v_fej+_gravity*dt)); - F.block(v_id, v_id, 3, 3).setIdentity(); - F.block(v_id, ba_id, 3, 3) = -Rfej.transpose() * dt; - F.block(ba_id, ba_id, 3, 3).setIdentity(); - F.block(p_id, th_id, 3, 3).noalias() = -skew_x(new_p - p_fej - v_fej * dt + 0.5 * _gravity * dt * dt) * Rfej.transpose(); - // F.block(p_id, th_id, 3, 3).noalias() = -0.5 * Rfej.transpose() * skew_x(2*Rfej*(new_p-p_fej-v_fej*dt+0.5*_gravity*dt*dt)); - F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; - F.block(p_id, ba_id, 3, 3) = -0.5 * Rfej.transpose() * dt * dt; - F.block(p_id, p_id, 3, 3).setIdentity(); - - G.block(th_id, 0, 3, 3) = -dR * Jr_so3(-w_hat * dt) * dt; - // G.block(th_id, 0, 3, 3) = -dR * Jr_so3(-log_so3(dR)) * dt; - G.block(v_id, 3, 3, 3) = -Rfej.transpose() * dt; - G.block(p_id, 3, 3, 3) = -0.5 * Rfej.transpose() * dt * dt; - G.block(bg_id, 6, 3, 3) = Eigen::Matrix::Identity(); - G.block(ba_id, 9, 3, 3) = Eigen::Matrix::Identity(); - + if (state->_options.integration_method == StateOptions::IntegrationMethod::ANALYTICAL) { + predict_mean_analytic(state, dt, w_hat_avg, a_hat_avg, new_q, new_v, new_p, Xi_sum); + } else if (state->_options.integration_method == StateOptions::IntegrationMethod::RK4) { + predict_mean_rk4(state, dt, w_hat1, a_hat1, w_hat2, a_hat2, new_q, new_v, new_p); } else { + predict_mean_discrete(state, dt, w_hat_avg, a_hat_avg, new_q, new_v, new_p); + } - Eigen::Matrix R_Gtoi = state->_imu->Rot(); - - F.block(th_id, th_id, 3, 3) = exp_so3(-w_hat * dt); - F.block(th_id, bg_id, 3, 3).noalias() = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt; - F.block(bg_id, bg_id, 3, 3).setIdentity(); - F.block(v_id, th_id, 3, 3).noalias() = -R_Gtoi.transpose() * skew_x(a_hat * dt); - F.block(v_id, v_id, 3, 3).setIdentity(); - F.block(v_id, ba_id, 3, 3) = -R_Gtoi.transpose() * dt; - F.block(ba_id, ba_id, 3, 3).setIdentity(); - F.block(p_id, th_id, 3, 3).noalias() = -0.5 * R_Gtoi.transpose() * skew_x(a_hat * dt * dt); - F.block(p_id, v_id, 3, 3) = Eigen::Matrix::Identity() * dt; - F.block(p_id, ba_id, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt; - F.block(p_id, p_id, 3, 3).setIdentity(); - - G.block(th_id, 0, 3, 3) = -exp_so3(-w_hat * dt) * Jr_so3(-w_hat * dt) * dt; - G.block(v_id, 3, 3, 3) = -R_Gtoi.transpose() * dt; - G.block(p_id, 3, 3, 3) = -0.5 * R_Gtoi.transpose() * dt * dt; - G.block(bg_id, 6, 3, 3) = Eigen::Matrix::Identity(); - G.block(ba_id, 9, 3, 3) = Eigen::Matrix::Identity(); + // Allocate state transition and continuous-time noise Jacobian + F = Eigen::MatrixXd::Zero(state->imu_intrinsic_size() + 15, state->imu_intrinsic_size() + 15); + Eigen::MatrixXd G = Eigen::MatrixXd::Zero(state->imu_intrinsic_size() + 15, 12); + if (state->_options.integration_method == StateOptions::IntegrationMethod::RK4 || + state->_options.integration_method == StateOptions::IntegrationMethod::ANALYTICAL) { + compute_F_and_G_analytic(state, dt, w_hat_avg, a_hat_avg, w_uncorrected, a_uncorrected, new_q, new_v, new_p, Xi_sum, F, G); + } else { + compute_F_and_G_discrete(state, dt, w_hat_avg, a_hat_avg, w_uncorrected, a_uncorrected, new_q, new_v, new_p, F, G); } // Construct our discrete noise covariance matrix // Note that we need to convert our continuous time noises to discrete // Equations (129) amd (130) of Trawny tech report Eigen::Matrix Qc = Eigen::Matrix::Zero(); - Qc.block(0, 0, 3, 3) = _noises.sigma_w_2 / dt * Eigen::Matrix::Identity(); - Qc.block(3, 3, 3, 3) = _noises.sigma_a_2 / dt * Eigen::Matrix::Identity(); - Qc.block(6, 6, 3, 3) = _noises.sigma_wb_2 * dt * Eigen::Matrix::Identity(); - Qc.block(9, 9, 3, 3) = _noises.sigma_ab_2 * dt * Eigen::Matrix::Identity(); + Qc.block(0, 0, 3, 3) = std::pow(_noises.sigma_w, 2) / dt * Eigen::Matrix3d::Identity(); + Qc.block(3, 3, 3, 3) = std::pow(_noises.sigma_a, 2) / dt * Eigen::Matrix3d::Identity(); + Qc.block(6, 6, 3, 3) = std::pow(_noises.sigma_wb, 2) / dt * Eigen::Matrix3d::Identity(); + Qc.block(9, 9, 3, 3) = std::pow(_noises.sigma_ab, 2) / dt * Eigen::Matrix3d::Identity(); // Compute the noise injected into the state over the interval + Qd = Eigen::MatrixXd::Zero(state->imu_intrinsic_size() + 15, state->imu_intrinsic_size() + 15); Qd = G * Qc * G.transpose(); Qd = 0.5 * (Qd + Qd.transpose()); @@ -465,26 +479,17 @@ void Propagator::predict_and_compute(std::shared_ptr state, const ov_core state->_imu->set_fej(imu_x); } -void Propagator::predict_mean_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat1, - const Eigen::Vector3d &a_hat1, const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, +void Propagator::predict_mean_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p) { - // If we are averaging the IMU, then do so - Eigen::Vector3d w_hat = w_hat1; - Eigen::Vector3d a_hat = a_hat1; - if (state->_options.imu_avg) { - w_hat = .5 * (w_hat1 + w_hat2); - a_hat = .5 * (a_hat1 + a_hat2); - } - // Pre-compute things double w_norm = w_hat.norm(); - Eigen::Matrix I_4x4 = Eigen::Matrix::Identity(); - Eigen::Matrix R_Gtoi = state->_imu->Rot(); + Eigen::Matrix4d I_4x4 = Eigen::Matrix4d::Identity(); + Eigen::Matrix3d R_Gtoi = state->_imu->Rot(); // Orientation: Equation (101) and (103) and of Trawny indirect TR Eigen::Matrix bigO; - if (w_norm > 1e-20) { + if (w_norm > 1e-12) { bigO = cos(0.5 * w_norm * dt) * I_4x4 + 1 / w_norm * sin(0.5 * w_norm * dt) * Omega(w_hat); } else { bigO = I_4x4 + 0.5 * dt * Omega(w_hat); @@ -579,3 +584,433 @@ void Propagator::predict_mean_rk4(std::shared_ptr state, double dt, const new_p = p_0 + (1.0 / 6.0) * k1_p + (1.0 / 3.0) * k2_p + (1.0 / 3.0) * k3_p + (1.0 / 6.0) * k4_p; new_v = v_0 + (1.0 / 6.0) * k1_v + (1.0 / 3.0) * k2_v + (1.0 / 3.0) * k3_v + (1.0 / 6.0) * k4_v; } + +void Propagator::compute_Xi_sum(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + Eigen::Matrix &Xi_sum) { + + // Decompose our angular velocity into a direction and amount + double w_norm = w_hat.norm(); + double d_th = w_norm * dt; + Eigen::Vector3d k_hat = Eigen::Vector3d::Zero(); + if (w_norm > 1e-12) { + k_hat = w_hat / w_norm; + } + + // Compute useful identities used throughout + Eigen::Matrix3d I_3x3 = Eigen::Matrix3d::Identity(); + double d_t2 = std::pow(dt, 2); + double d_t3 = std::pow(dt, 3); + double w_norm2 = std::pow(w_norm, 2); + double w_norm3 = std::pow(w_norm, 3); + double cos_dth = std::cos(d_th); + double sin_dth = std::sin(d_th); + double d_th2 = std::pow(d_th, 2); + double d_th3 = std::pow(d_th, 3); + Eigen::Matrix3d sK = ov_core::skew_x(k_hat); + Eigen::Matrix3d sK2 = sK * sK; + Eigen::Matrix3d sA = ov_core::skew_x(a_hat); + + // Integration components will be used later + Eigen::Matrix3d R_ktok1, Xi_1, Xi_2, Jr_ktok1, Xi_3, Xi_4; + R_ktok1 = ov_core::exp_so3(-w_hat * dt); + Jr_ktok1 = ov_core::Jr_so3(-w_hat * dt); + + // Now begin the integration of each component + // Based on the delta theta, let's decide which integration will be used + bool small_w = (w_norm < 1.0 / 180 * M_PI / 2); + if (!small_w) { + + // first order rotation integration with constant omega + Xi_1 = I_3x3 * dt + (1.0 - cos_dth) / w_norm * sK + (dt - sin_dth / w_norm) * sK2; + + // second order rotation integration with constant omega + Xi_2 = 1.0 / 2 * d_t2 * I_3x3 + (d_th - sin_dth) / w_norm2 * sK + (1.0 / 2 * d_t2 - (1.0 - cos_dth) / w_norm2) * sK2; + + // first order integration with constant omega and constant acc + Xi_3 = 1.0 / 2 * d_t2 * sA + (sin_dth - d_th) / w_norm2 * sA * sK + (sin_dth - d_th * cos_dth) / w_norm2 * sK * sA + + (1.0 / 2 * d_t2 - (1.0 - cos_dth) / w_norm2) * sA * sK2 + + (1.0 / 2 * d_t2 + (1.0 - cos_dth - d_th * sin_dth) / w_norm2) * (sK2 * sA + k_hat.dot(a_hat) * sK) - + (3 * sin_dth - 2 * d_th - d_th * cos_dth) / w_norm2 * k_hat.dot(a_hat) * sK2; + + // second order integration with constant omega and constant acc + Xi_4 = 1.0 / 6 * d_t3 * sA + (2 * (1.0 - cos_dth) - d_th2) / (2 * w_norm3) * sA * sK + + ((2 * (1.0 - cos_dth) - d_th * sin_dth) / w_norm3) * sK * sA + ((sin_dth - d_th) / w_norm3 + d_t3 / 6) * sA * sK2 + + ((d_th - 2 * sin_dth + 1.0 / 6 * d_th3 + d_th * cos_dth) / w_norm3) * (sK2 * sA + k_hat.dot(a_hat) * sK) + + (4 * cos_dth - 4 + d_th2 + d_th * sin_dth) / w_norm3 * k_hat.dot(a_hat) * sK2; + + } else { + + // first order rotation integration with constant omega + Xi_1 = dt * (I_3x3 + sin_dth * sK + (1.0 - cos_dth) * sK2); + + // second order rotation integration with constant omega + Xi_2 = 1.0 / 2 * dt * Xi_1; + + // first order integration with constant omega and constant acc + Xi_3 = 1.0 / 2 * d_t2 * + (sA + sin_dth * (-sA * sK + sK * sA + k_hat.dot(a_hat) * sK2) + (1.0 - cos_dth) * (sA * sK2 + sK2 * sA + k_hat.dot(a_hat) * sK)); + + // second order integration with constant omega and constant acc + Xi_4 = 1.0 / 3 * dt * Xi_3; + } + + // Store the integrated parameters + Xi_sum.setZero(); + Xi_sum.block(0, 0, 3, 3) = R_ktok1; + Xi_sum.block(0, 3, 3, 3) = Xi_1; + Xi_sum.block(0, 6, 3, 3) = Xi_2; + Xi_sum.block(0, 9, 3, 3) = Jr_ktok1; + Xi_sum.block(0, 12, 3, 3) = Xi_3; + Xi_sum.block(0, 15, 3, 3) = Xi_4; +} + +void Propagator::predict_mean_analytic(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p, + Eigen::Matrix &Xi_sum) { + + // Pre-compute things + Eigen::Matrix3d R_Gtok = state->_imu->Rot(); + Eigen::Vector4d q_ktok1 = ov_core::rot_2_quat(Xi_sum.block(0, 0, 3, 3)); + Eigen::Matrix3d Xi_1 = Xi_sum.block(0, 3, 3, 3); + Eigen::Matrix3d Xi_2 = Xi_sum.block(0, 6, 3, 3); + + // Use our integrated Xi's to move the state forward + new_q = ov_core::quat_multiply(q_ktok1, state->_imu->quat()); + new_v = state->_imu->vel() + R_Gtok.transpose() * Xi_1 * a_hat - _gravity * dt; + new_p = state->_imu->pos() + state->_imu->vel() * dt + R_Gtok.transpose() * Xi_2 * a_hat - 0.5 * _gravity * dt * dt; +} + +void Propagator::compute_F_and_G_analytic(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, + const Eigen::Vector3d &a_hat, const Eigen::Vector3d &w_uncorrected, + const Eigen::Vector3d &a_uncorrected, const Eigen::Vector4d &new_q, const Eigen::Vector3d &new_v, + const Eigen::Vector3d &new_p, const Eigen::Matrix &Xi_sum, Eigen::MatrixXd &F, + Eigen::MatrixXd &G) { + + // Get the locations of each entry of the imu state + int local_size = 0; + int th_id = local_size; + local_size += state->_imu->q()->size(); + int p_id = local_size; + local_size += state->_imu->p()->size(); + int v_id = local_size; + local_size += state->_imu->v()->size(); + int bg_id = local_size; + local_size += state->_imu->bg()->size(); + int ba_id = local_size; + local_size += state->_imu->ba()->size(); + + // If we are doing calibration, we can define their "local" id in the state transition + int Dw_id = -1; + int Da_id = -1; + int Tg_id = -1; + int th_atoI_id = -1; + int th_wtoI_id = -1; + if (state->_options.do_calib_imu_intrinsics) { + Dw_id = local_size; + local_size += state->_calib_imu_dw->size(); + Da_id = local_size; + local_size += state->_calib_imu_da->size(); + if (state->_options.do_calib_imu_g_sensitivity) { + Tg_id = local_size; + local_size += state->_calib_imu_tg->size(); + } + if (state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + th_wtoI_id = local_size; + local_size += state->_calib_imu_GYROtoIMU->size(); + } else { + th_atoI_id = local_size; + local_size += state->_calib_imu_ACCtoIMU->size(); + } + } + + // The change in the orientation from the end of the last prop to the current prop + // This is needed since we need to include the "k-th" updated orientation information + Eigen::Matrix3d R_k = state->_imu->Rot(); + Eigen::Vector3d v_k = state->_imu->vel(); + Eigen::Vector3d p_k = state->_imu->pos(); + if (state->_options.do_fej) { + R_k = state->_imu->Rot_fej(); + v_k = state->_imu->vel_fej(); + p_k = state->_imu->pos_fej(); + } + Eigen::Matrix3d dR_ktok1 = quat_2_Rot(new_q) * R_k.transpose(); + + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + Eigen::Matrix3d R_atoI = state->_calib_imu_ACCtoIMU->Rot(); + Eigen::Matrix3d R_wtoI = state->_calib_imu_GYROtoIMU->Rot(); + Eigen::Vector3d a_k = R_atoI * Da * a_uncorrected; + Eigen::Vector3d w_k = R_wtoI * Dw * w_uncorrected; // contains gravity correction already + + Eigen::Matrix3d Xi_1 = Xi_sum.block(0, 3, 3, 3); + Eigen::Matrix3d Xi_2 = Xi_sum.block(0, 6, 3, 3); + Eigen::Matrix3d Jr_ktok1 = Xi_sum.block(0, 9, 3, 3); + Eigen::Matrix3d Xi_3 = Xi_sum.block(0, 12, 3, 3); + Eigen::Matrix3d Xi_4 = Xi_sum.block(0, 15, 3, 3); + + // for th + F.block(th_id, th_id, 3, 3) = dR_ktok1; + F.block(p_id, th_id, 3, 3) = -skew_x(new_p - p_k - v_k * dt + 0.5 * _gravity * dt * dt) * R_k.transpose(); + F.block(v_id, th_id, 3, 3) = -skew_x(new_v - v_k + _gravity * dt) * R_k.transpose(); + + // for p + F.block(p_id, p_id, 3, 3).setIdentity(); + + // for v + F.block(p_id, v_id, 3, 3) = Eigen::Matrix3d::Identity() * dt; + F.block(v_id, v_id, 3, 3).setIdentity(); + + // for bg + F.block(th_id, bg_id, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw; + F.block(p_id, bg_id, 3, 3) = R_k.transpose() * Xi_4 * R_wtoI * Dw; + F.block(v_id, bg_id, 3, 3) = R_k.transpose() * Xi_3 * R_wtoI * Dw; + F.block(bg_id, bg_id, 3, 3).setIdentity(); + + // for ba + F.block(th_id, ba_id, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * R_atoI * Da; + F.block(p_id, ba_id, 3, 3) = -R_k.transpose() * (Xi_2 + Xi_4 * R_wtoI * Dw * Tg) * R_atoI * Da; + F.block(v_id, ba_id, 3, 3) = -R_k.transpose() * (Xi_1 + Xi_3 * R_wtoI * Dw * Tg) * R_atoI * Da; + F.block(ba_id, ba_id, 3, 3).setIdentity(); + + // begin to add the state transition matrix for the omega intrinsics Dw part + if (Dw_id != -1) { + Eigen::MatrixXd H_Dw = compute_H_Dw(state, w_uncorrected); + F.block(th_id, Dw_id, 3, state->_calib_imu_dw->size()) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * H_Dw; + F.block(p_id, Dw_id, 3, state->_calib_imu_dw->size()) = -R_k.transpose() * Xi_4 * R_wtoI * H_Dw; + F.block(v_id, Dw_id, 3, state->_calib_imu_dw->size()) = -R_k.transpose() * Xi_3 * R_wtoI * H_Dw; + F.block(Dw_id, Dw_id, state->_calib_imu_dw->size(), state->_calib_imu_dw->size()).setIdentity(); + } + + // begin to add the state transition matrix for the acc intrinsics Da part + if (Da_id != -1) { + Eigen::MatrixXd H_Da = compute_H_Da(state, a_uncorrected); + F.block(th_id, Da_id, 3, state->_calib_imu_da->size()) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * R_atoI * H_Da; + F.block(p_id, Da_id, 3, state->_calib_imu_da->size()) = R_k.transpose() * (Xi_2 + Xi_4 * R_wtoI * Dw * Tg) * R_atoI * H_Da; + F.block(v_id, Da_id, 3, state->_calib_imu_da->size()) = R_k.transpose() * (Xi_1 + Xi_3 * R_wtoI * Dw * Tg) * R_atoI * H_Da; + F.block(Da_id, Da_id, state->_calib_imu_da->size(), state->_calib_imu_da->size()).setIdentity(); + } + + // add the state transition matrix of the Tg part + if (Tg_id != -1) { + Eigen::MatrixXd H_Tg = compute_H_Tg(state, a_k); + F.block(th_id, Tg_id, 3, state->_calib_imu_tg->size()) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * H_Tg; + F.block(p_id, Tg_id, 3, state->_calib_imu_tg->size()) = R_k.transpose() * Xi_4 * R_wtoI * Dw * H_Tg; + F.block(v_id, Tg_id, 3, state->_calib_imu_tg->size()) = R_k.transpose() * Xi_3 * R_wtoI * Dw * H_Tg; + F.block(Tg_id, Tg_id, state->_calib_imu_tg->size(), state->_calib_imu_tg->size()).setIdentity(); + } + + // begin to add the state transition matrix for the R_ACCtoIMU part + if (th_atoI_id != -1) { + F.block(th_id, th_atoI_id, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * ov_core::skew_x(a_k); + F.block(p_id, th_atoI_id, 3, 3) = R_k.transpose() * (Xi_2 + Xi_4 * R_wtoI * Dw * Tg) * ov_core::skew_x(a_k); + F.block(v_id, th_atoI_id, 3, 3) = R_k.transpose() * (Xi_1 + Xi_3 * R_wtoI * Dw * Tg) * ov_core::skew_x(a_k); + F.block(th_atoI_id, th_atoI_id, 3, 3).setIdentity(); + } + + // begin to add the state transition matrix for the R_GYROtoIMU part + if (th_wtoI_id != -1) { + F.block(th_id, th_wtoI_id, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * ov_core::skew_x(w_k); + F.block(p_id, th_wtoI_id, 3, 3) = -R_k.transpose() * Xi_4 * ov_core::skew_x(w_k); + F.block(v_id, th_wtoI_id, 3, 3) = -R_k.transpose() * Xi_3 * ov_core::skew_x(w_k); + F.block(th_wtoI_id, th_wtoI_id, 3, 3).setIdentity(); + } + + // construct the G part + G.block(th_id, 0, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw; + G.block(p_id, 0, 3, 3) = R_k.transpose() * Xi_4 * R_wtoI * Dw; + G.block(v_id, 0, 3, 3) = R_k.transpose() * Xi_3 * R_wtoI * Dw; + G.block(th_id, 3, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * R_atoI * Da; + G.block(p_id, 3, 3, 3) = -R_k.transpose() * (Xi_2 + Xi_4 * R_wtoI * Dw * Tg) * R_atoI * Da; + G.block(v_id, 3, 3, 3) = -R_k.transpose() * (Xi_1 + Xi_3 * R_wtoI * Dw * Tg) * R_atoI * Da; + G.block(bg_id, 6, 3, 3) = dt * Eigen::Matrix3d::Identity(); + G.block(ba_id, 9, 3, 3) = dt * Eigen::Matrix3d::Identity(); +} + +void Propagator::compute_F_and_G_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, + const Eigen::Vector3d &a_hat, const Eigen::Vector3d &w_uncorrected, + const Eigen::Vector3d &a_uncorrected, const Eigen::Vector4d &new_q, const Eigen::Vector3d &new_v, + const Eigen::Vector3d &new_p, Eigen::MatrixXd &F, Eigen::MatrixXd &G) { + + // Get the locations of each entry of the imu state + int local_size = 0; + int th_id = local_size; + local_size += state->_imu->q()->size(); + int p_id = local_size; + local_size += state->_imu->p()->size(); + int v_id = local_size; + local_size += state->_imu->v()->size(); + int bg_id = local_size; + local_size += state->_imu->bg()->size(); + int ba_id = local_size; + local_size += state->_imu->ba()->size(); + + // If we are doing calibration, we can define their "local" id in the state transition + int Dw_id = -1; + int Da_id = -1; + int Tg_id = -1; + int th_atoI_id = -1; + int th_wtoI_id = -1; + if (state->_options.do_calib_imu_intrinsics) { + Dw_id = local_size; + local_size += state->_calib_imu_dw->size(); + Da_id = local_size; + local_size += state->_calib_imu_da->size(); + if (state->_options.do_calib_imu_g_sensitivity) { + Tg_id = local_size; + local_size += state->_calib_imu_tg->size(); + } + if (state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + th_wtoI_id = local_size; + local_size += state->_calib_imu_GYROtoIMU->size(); + } else { + th_atoI_id = local_size; + local_size += state->_calib_imu_ACCtoIMU->size(); + } + } + + // This is the change in the orientation from the end of the last prop to the current prop + // This is needed since we need to include the "k-th" updated orientation information + Eigen::Matrix3d R_k = state->_imu->Rot(); + Eigen::Vector3d v_k = state->_imu->vel(); + Eigen::Vector3d p_k = state->_imu->pos(); + if (state->_options.do_fej) { + R_k = state->_imu->Rot_fej(); + v_k = state->_imu->vel_fej(); + p_k = state->_imu->pos_fej(); + } + Eigen::Matrix3d dR_ktok1 = quat_2_Rot(new_q) * R_k.transpose(); + + // This is the change in the orientation from the end of the last prop to the current prop + // This is needed since we need to include the "k-th" updated orientation information + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + Eigen::Matrix3d R_atoI = state->_calib_imu_ACCtoIMU->Rot(); + Eigen::Matrix3d R_wtoI = state->_calib_imu_GYROtoIMU->Rot(); + Eigen::Vector3d a_k = R_atoI * Da * a_uncorrected; + Eigen::Vector3d w_k = R_wtoI * Dw * w_uncorrected; // contains gravity correction already + Eigen::Matrix3d Jr_ktok1 = Jr_so3(log_so3(dR_ktok1)); + + // for theta + F.block(th_id, th_id, 3, 3) = dR_ktok1; + // F.block(th_id, bg_id, 3, 3) = -dR_ktok1 * Jr_so3(w_hat * dt) * dt * R_wtoI_fej * Dw_fej; + F.block(th_id, bg_id, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw; + F.block(th_id, ba_id, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * R_atoI * Da; + + // for position + F.block(p_id, th_id, 3, 3) = -skew_x(new_p - p_k - v_k * dt + 0.5 * _gravity * dt * dt) * R_k.transpose(); + F.block(p_id, p_id, 3, 3).setIdentity(); + F.block(p_id, v_id, 3, 3) = Eigen::Matrix3d::Identity() * dt; + F.block(p_id, ba_id, 3, 3) = -0.5 * R_k.transpose() * dt * dt * R_atoI * Da; + + // for velocity + F.block(v_id, th_id, 3, 3) = -skew_x(new_v - v_k + _gravity * dt) * R_k.transpose(); + F.block(v_id, v_id, 3, 3).setIdentity(); + F.block(v_id, ba_id, 3, 3) = -R_k.transpose() * dt * R_atoI * Da; + + // for bg + F.block(bg_id, bg_id, 3, 3).setIdentity(); + + // for ba + F.block(ba_id, ba_id, 3, 3).setIdentity(); + + // begin to add the state transition matrix for the omega intrinsics Dw part + if (Dw_id != -1) { + Eigen::MatrixXd H_Dw = compute_H_Dw(state, w_uncorrected); + F.block(th_id, Dw_id, 3, state->_calib_imu_dw->size()) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * H_Dw; + F.block(Dw_id, Dw_id, state->_calib_imu_dw->size(), state->_calib_imu_dw->size()).setIdentity(); + } + + // begin to add the state transition matrix for the acc intrinsics Da part + if (Da_id != -1) { + Eigen::MatrixXd H_Da = compute_H_Da(state, a_uncorrected); + F.block(th_id, Da_id, 3, state->_calib_imu_da->size()) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Tg * R_atoI * H_Da; + F.block(p_id, Da_id, 3, state->_calib_imu_da->size()) = 0.5 * R_k.transpose() * dt * dt * R_atoI * H_Da; + F.block(v_id, Da_id, 3, state->_calib_imu_da->size()) = R_k.transpose() * dt * R_atoI * H_Da; + F.block(Da_id, Da_id, state->_calib_imu_da->size(), state->_calib_imu_da->size()).setIdentity(); + } + + // begin to add the state transition matrix for the gravity sensitivity Tg part + if (Tg_id != -1) { + Eigen::MatrixXd H_Tg = compute_H_Tg(state, a_k); + F.block(th_id, Tg_id, 3, state->_calib_imu_tg->size()) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * H_Tg; + F.block(Tg_id, Tg_id, state->_calib_imu_tg->size(), state->_calib_imu_tg->size()).setIdentity(); + } + + // begin to add the state transition matrix for the R_ACCtoIMU part + if (th_atoI_id != -1) { + F.block(th_id, th_atoI_id, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * ov_core::skew_x(a_k); + F.block(p_id, th_atoI_id, 3, 3) = 0.5 * R_k.transpose() * dt * dt * ov_core::skew_x(a_k); + F.block(v_id, th_atoI_id, 3, 3) = R_k.transpose() * dt * ov_core::skew_x(a_k); + F.block(th_atoI_id, th_atoI_id, 3, 3).setIdentity(); + } + + // begin to add the state transition matrix for the R_GYROtoIMU part + if (th_wtoI_id != -1) { + F.block(th_id, th_wtoI_id, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * ov_core::skew_x(w_k); + F.block(th_wtoI_id, th_wtoI_id, 3, 3).setIdentity(); + } + + // Noise jacobian + G.block(th_id, 0, 3, 3) = -dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw; + G.block(th_id, 3, 3, 3) = dR_ktok1 * Jr_ktok1 * dt * R_wtoI * Dw * Tg * R_atoI * Da; + G.block(v_id, 3, 3, 3) = -R_k.transpose() * dt * R_atoI * Da; + G.block(p_id, 3, 3, 3) = -0.5 * R_k.transpose() * dt * dt * R_atoI * Da; + G.block(bg_id, 6, 3, 3) = dt * Eigen::Matrix3d::Identity(); + G.block(ba_id, 9, 3, 3) = dt * Eigen::Matrix3d::Identity(); +} + +Eigen::MatrixXd Propagator::compute_H_Dw(std::shared_ptr state, const Eigen::Vector3d &w_uncorrected) { + + Eigen::Matrix3d I_3x3 = Eigen::MatrixXd::Identity(3, 3); + Eigen::Vector3d e_1 = I_3x3.block(0, 0, 3, 1); + Eigen::Vector3d e_2 = I_3x3.block(0, 1, 3, 1); + Eigen::Vector3d e_3 = I_3x3.block(0, 2, 3, 1); + double w_1 = w_uncorrected(0); + double w_2 = w_uncorrected(1); + double w_3 = w_uncorrected(2); + assert(state->_options.do_calib_imu_intrinsics); + + Eigen::MatrixXd H_Dw = Eigen::MatrixXd::Zero(3, 6); + if (state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + H_Dw << w_1 * I_3x3, w_2 * e_2, w_2 * e_3, w_3 * e_3; + } else { + H_Dw << w_1 * e_1, w_2 * e_1, w_2 * e_2, w_3 * I_3x3; + } + return H_Dw; +} + +Eigen::MatrixXd Propagator::compute_H_Da(std::shared_ptr state, const Eigen::Vector3d &a_uncorrected) { + + Eigen::Matrix3d I_3x3 = Eigen::MatrixXd::Identity(3, 3); + Eigen::Vector3d e_1 = I_3x3.block(0, 0, 3, 1); + Eigen::Vector3d e_2 = I_3x3.block(0, 1, 3, 1); + Eigen::Vector3d e_3 = I_3x3.block(0, 2, 3, 1); + double a_1 = a_uncorrected(0); + double a_2 = a_uncorrected(1); + double a_3 = a_uncorrected(2); + assert(state->_options.do_calib_imu_intrinsics); + + Eigen::MatrixXd H_Da = Eigen::MatrixXd::Zero(3, 6); + if (state->_options.imu_model == StateOptions::ImuModel::KALIBR) { + H_Da << a_1 * I_3x3, a_2 * e_2, a_2 * e_3, a_3 * e_3; + } else { + H_Da << a_1 * e_1, a_2 * e_1, a_2 * e_2, a_3 * I_3x3; + } + return H_Da; +} + +Eigen::MatrixXd Propagator::compute_H_Tg(std::shared_ptr state, const Eigen::Vector3d &a_inI) { + + Eigen::Matrix3d I_3x3 = Eigen::MatrixXd::Identity(3, 3); + double a_1 = a_inI(0); + double a_2 = a_inI(1); + double a_3 = a_inI(2); + assert(state->_options.do_calib_imu_intrinsics); + assert(state->_options.do_calib_imu_g_sensitivity); + + Eigen::MatrixXd H_Tg = Eigen::MatrixXd::Zero(3, 9); + H_Tg << a_1 * I_3x3, a_2 * I_3x3, a_3 * I_3x3; + return H_Tg; +} \ No newline at end of file diff --git a/ov_msckf/src/state/Propagator.h b/ov_msckf/src/state/Propagator.h index e63062992..26b4af316 100644 --- a/ov_msckf/src/state/Propagator.h +++ b/ov_msckf/src/state/Propagator.h @@ -26,11 +26,10 @@ #include #include -#include "utils/NoiseManager.h" - -#include "utils/print.h" #include "utils/sensor_data.h" +#include "utils/NoiseManager.h" + namespace ov_msckf { class State; @@ -43,7 +42,6 @@ class State; * For derivations look at @ref propagation page which has detailed equations. */ class Propagator { - public: /** * @brief Default constructor @@ -95,9 +93,7 @@ class Propagator { /** * @brief Will invalidate the cache used for fast propagation */ - void invalidate_cache() { - cache_imu_valid = false; - } + void invalidate_cache() { cache_imu_valid = false; } /** * @brief Propagate state up to given timestamp and then clone @@ -167,8 +163,63 @@ class Propagator { return data; } -protected: + /** + * @brief compute the Jacobians for Dw + * + * See @ref analytical_linearization_imu for details. + * \f{align*}{ + * \mathbf{H}_{Dw,kalibr} & = + * \begin{bmatrix} + * {}^w\hat{w}_1 \mathbf{I}_3 & {}^w\hat{w}_2\mathbf{e}_2 & {}^w\hat{w}_2\mathbf{e}_3 & {}^w\hat{w}_3 \mathbf{e}_3 + * \end{bmatrix} \\ + * \mathbf{H}_{Dw,rpng} & = + * \begin{bmatrix} + * {}^w\hat{w}_1\mathbf{e}_1 & {}^w\hat{w}_2\mathbf{e}_1 & {}^w\hat{w}_2\mathbf{e}_2 & {}^w\hat{w}_3 \mathbf{I}_3 + * \end{bmatrix} + * \f} + * + * @param state Pointer to state + * @param w_uncorrected Angular velocity in a frame with bias and gravity sensitivity removed + */ + static Eigen::MatrixXd compute_H_Dw(std::shared_ptr state, const Eigen::Vector3d &w_uncorrected); + /** + * @brief compute the Jacobians for Da + * + * See @ref analytical_linearization_imu for details. + * \f{align*}{ + * \mathbf{H}_{Da,kalibr} & = + * \begin{bmatrix} + * {}^a\hat{a}_1\mathbf{e}_1 & {}^a\hat{a}_2\mathbf{e}_1 & {}^a\hat{a}_2\mathbf{e}_2 & {}^a\hat{a}_3 \mathbf{I}_3 + * \end{bmatrix} \\ + * \mathbf{H}_{Da,rpng} & = + * \begin{bmatrix} + * {}^a\hat{a}_1 \mathbf{I}_3 & & {}^a\hat{a}_2\mathbf{e}_2 & {}^a\hat{a}_2\mathbf{e}_3 & {}^a\hat{a}_3\mathbf{e}_3 + * \end{bmatrix} + * \f} + * + * @param state Pointer to state + * @param a_uncorrected Linear acceleration in gyro frame with bias removed + */ + static Eigen::MatrixXd compute_H_Da(std::shared_ptr state, const Eigen::Vector3d &a_uncorrected); + + /** + * @brief compute the Jacobians for Tg + * + * See @ref analytical_linearization_imu for details. + * \f{align*}{ + * \mathbf{H}_{Tg} & = + * \begin{bmatrix} + * {}^I\hat{a}_1 \mathbf{I}_3 & {}^I\hat{a}_2 \mathbf{I}_3 & {}^I\hat{a}_3 \mathbf{I}_3 + * \end{bmatrix} + * \f} + * + * @param state Pointer to state + * @param a_inI Linear acceleration with bias removed + */ + static Eigen::MatrixXd compute_H_Tg(std::shared_ptr state, const Eigen::Vector3d &a_inI); + +protected: /** * @brief Propagates the state forward using the imu data and computes the noise covariance and state-transition * matrix of this interval. @@ -176,7 +227,8 @@ class Propagator { * This function can be replaced with analytical/numerical integration or when using a different state representation. * This contains our state transition matrix along with how our noise evolves in time. * If you have other state variables besides the IMU that evolve you would add them here. - * See the @ref error_prop page for details on how this was derived. + * See the @ref propagation_discrete page for details on how discrete model was derived. + * See the @ref propagation_analytical page for details on how analytic model was derived. * * @param state Pointer to state * @param data_minus imu readings at beginning of interval @@ -185,12 +237,12 @@ class Propagator { * @param Qd Discrete-time noise covariance over the interval */ void predict_and_compute(std::shared_ptr state, const ov_core::ImuData &data_minus, const ov_core::ImuData &data_plus, - Eigen::Matrix &F, Eigen::Matrix &Qd); + Eigen::MatrixXd &F, Eigen::MatrixXd &Qd); /** * @brief Discrete imu mean propagation. * - * See @ref propagation for details on these equations. + * See @ref disc_prop for details on these equations. * \f{align*}{ * \text{}^{I_{k+1}}_{G}\hat{\bar{q}} * &= \exp\bigg(\frac{1}{2}\boldsymbol{\Omega}\big({\boldsymbol{\omega}}_{m,k}-\hat{\mathbf{b}}_{g,k}\big)\Delta t\bigg) @@ -205,25 +257,22 @@ class Propagator { * * @param state Pointer to state * @param dt Time we should integrate over - * @param w_hat1 Angular velocity with bias removed - * @param a_hat1 Linear acceleration with bias removed - * @param w_hat2 Next angular velocity with bias removed - * @param a_hat2 Next linear acceleration with bias removed + * @param w_hat Angular velocity with bias removed + * @param a_hat Linear acceleration with bias removed * @param new_q The resulting new orientation after integration * @param new_v The resulting new velocity after integration * @param new_p The resulting new position after integration */ - void predict_mean_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat1, const Eigen::Vector3d &a_hat1, - const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, - Eigen::Vector3d &new_p); + void predict_mean_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p); /** * @brief RK4 imu mean propagation. * * See this wikipedia page on [Runge-Kutta Methods](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods). - * We are doing a RK4 method, [this wolframe page](http://mathworld.wolfram.com/Runge-KuttaMethod.html) has the forth order equation + * We are doing a RK4 method, [this wolfram page](http://mathworld.wolfram.com/Runge-KuttaMethod.html) has the forth order equation * defined below. We define function \f$ f(t,y) \f$ where y is a function of time t, see @ref imu_kinematic for the definition of the - * continous-time functions. + * continuous-time functions. * * \f{align*}{ * {k_1} &= f({t_0}, y_0) \Delta t \\ @@ -247,6 +296,141 @@ class Propagator { const Eigen::Vector3d &w_hat2, const Eigen::Vector3d &a_hat2, Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p); + /** + * @brief Analytically compute the integration components based on ACI^2 + * + * See the @ref analytical_prop page and @ref analytical_integration_components for details. + * For computing Xi_1, Xi_2, Xi_3 and Xi_4 we have: + * + * \f{align*}{ + * \boldsymbol{\Xi}_1 & = \mathbf{I}_3 \delta t + \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}} \lfloor \hat{\mathbf{k}} \rfloor + * + \left(\delta t - \frac{\sin (\hat{\omega} \delta t)}{\hat{\omega}}\right) \lfloor \hat{\mathbf{k}} \rfloor^2 \\ + * \boldsymbol{\Xi}_2 & = \frac{1}{2} \delta t^2 \mathbf{I}_3 + + * \frac{\hat{\omega} \delta t - \sin (\hat{\omega} \delta t)}{\hat{\omega}^2}\lfloor \hat{\mathbf{k}} \rfloor + * + \left( \frac{1}{2} \delta t^2 - \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}^2} \right) \lfloor \hat{\mathbf{k}} \rfloor ^2 + * \\ \boldsymbol{\Xi}_3 &= \frac{1}{2}\delta t^2 \lfloor \hat{\mathbf{a}} \rfloor + * + \frac{\sin (\hat{\omega} \delta t_i) - \hat{\omega} \delta t }{\hat{\omega}^2} \lfloor\hat{\mathbf{a}} \rfloor \lfloor + * \hat{\mathbf{k}} \rfloor + * + \frac{\sin (\hat{\omega} \delta t) - \hat{\omega} \delta t \cos (\hat{\omega} \delta t) }{\hat{\omega}^2} + * \lfloor \hat{\mathbf{k}} \rfloor\lfloor\hat{\mathbf{a}} \rfloor + * + \left( \frac{1}{2} \delta t^2 - \frac{1 - \cos (\hat{\omega} \delta t)}{\hat{\omega}^2} \right) \lfloor\hat{\mathbf{a}} \rfloor + * \lfloor \hat{\mathbf{k}} \rfloor ^2 + * + \left( + * \frac{1}{2} \delta t^2 + \frac{1 - \cos (\hat{\omega} \delta t) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t) }{\hat{\omega}^2} + * \right) + * \lfloor \hat{\mathbf{k}} \rfloor ^2 \lfloor\hat{\mathbf{a}} \rfloor + * + \left( + * \frac{1}{2} \delta t^2 + \frac{1 - \cos (\hat{\omega} \delta t) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t) }{\hat{\omega}^2} + * \right) \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + * - \frac{ 3 \sin (\hat{\omega} \delta t) - 2 \hat{\omega} \delta t - \hat{\omega} \delta t \cos (\hat{\omega} \delta t) + * }{\hat{\omega}^2} \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor ^2 \\ + * \boldsymbol{\Xi}_4 & = \frac{1}{6}\delta + * t^3 \lfloor\hat{\mathbf{a}} \rfloor + * + \frac{2(1 - \cos (\hat{\omega} \delta t)) - (\hat{\omega}^2 \delta t^2)}{2 \hat{\omega}^3} + * \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor + * + \left( + * \frac{2(1- \cos(\hat{\omega} \delta t)) - \hat{\omega} \delta t \sin (\hat{\omega} \delta t)}{\hat{\omega}^3} + * \right) + * \lfloor \hat{\mathbf{k}} \rfloor\lfloor\hat{\mathbf{a}} \rfloor + * + \left( + * \frac{\sin (\hat{\omega} \delta t) - \hat{\omega} \delta t}{\hat{\omega}^3} + + * \frac{\delta t^3}{6} + * \right) + * \lfloor\hat{\mathbf{a}} \rfloor \lfloor \hat{\mathbf{k}} \rfloor^2 + * + + * \frac{\hat{\omega} \delta t - 2 \sin(\hat{\omega} \delta t) + \frac{1}{6}(\hat{\omega} \delta t)^3 + \hat{\omega} \delta t + * \cos(\hat{\omega} \delta t)}{\hat{\omega}^3} \lfloor \hat{\mathbf{k}} \rfloor^2\lfloor\hat{\mathbf{a}} \rfloor + * + + * \frac{\hat{\omega} \delta t - 2 \sin(\hat{\omega} \delta t) + \frac{1}{6}(\hat{\omega} \delta t)^3 + \hat{\omega} \delta t + * \cos(\hat{\omega} \delta t)}{\hat{\omega}^3} \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor + * + + * \frac{4 \cos(\hat{\omega} \delta t) - 4 + (\hat{\omega} \delta t)^2 + \hat{\omega} \delta t \sin(\hat{\omega} \delta t) } + * {\hat{\omega}^3} + * \hat{\mathbf{k}}^{\top} \hat{\mathbf{a}} \lfloor \hat{\mathbf{k}} \rfloor^2 + * \f} + * + * @param state Pointer to state + * @param dt Time we should integrate over + * @param w_hat Angular velocity with bias removed + * @param a_hat Linear acceleration with bias removed + * @param Xi_sum All the needed integration components, including R_k, Xi_1, Xi_2, Jr, Xi_3, Xi_4 in order + */ + void compute_Xi_sum(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + Eigen::Matrix &Xi_sum); + + /** + * @brief Analytically predict IMU mean based on ACI^2 + * + * See the @ref analytical_prop page for details. + * + * \f{align*}{ + * {}^{I_{k+1}}_G\hat{\mathbf{R}} & \simeq \Delta \mathbf{R}_k {}^{I_k}_G\hat{\mathbf{R}} \\ + * {}^G\hat{\mathbf{p}}_{I_{k+1}} & \simeq {}^{G}\hat{\mathbf{p}}_{I_k} + {}^G\hat{\mathbf{v}}_{I_k}\delta t_k + + * {}^{I_k}_G\hat{\mathbf{R}}^\top \Delta \hat{\mathbf{p}}_k - \frac{1}{2}{}^G\mathbf{g}\delta t^2_k \\ + * {}^G\hat{\mathbf{v}}_{I_{k+1}} & \simeq {}^{G}\hat{\mathbf{v}}_{I_k} + {}^{I_k}_G\hat{\mathbf{R}}^\top + \Delta \hat{\mathbf{v}}_k - + * {}^G\mathbf{g}\delta t_k + * \f} + * + * @param state Pointer to state + * @param dt Time we should integrate over + * @param w_hat Angular velocity with bias removed + * @param a_hat Linear acceleration with bias removed + * @param new_q The resulting new orientation after integration + * @param new_v The resulting new velocity after integration + * @param new_p The resulting new position after integration + * @param Xi_sum All the needed integration components, including R_k, Xi_1, Xi_2, Jr, Xi_3, Xi_4 + */ + void predict_mean_analytic(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + Eigen::Vector4d &new_q, Eigen::Vector3d &new_v, Eigen::Vector3d &new_p, Eigen::Matrix &Xi_sum); + + /** + * @brief Analytically compute state transition matrix F and noise Jacobian G based on ACI^2 + * + * This function is for analytical integration of the linearized error-state. + * This contains our state transition matrix and noise Jacobians. + * If you have other state variables besides the IMU that evolve you would add them here. + * See the @ref analytical_linearization page for details on how this was derived. + * + * @param state Pointer to state + * @param dt Time we should integrate over + * @param w_hat Angular velocity with bias removed + * @param a_hat Linear acceleration with bias removed + * @param w_uncorrected Angular velocity in acc frame with bias and gravity sensitivity removed + * @param new_q The resulting new orientation after integration + * @param new_v The resulting new velocity after integration + * @param new_p The resulting new position after integration + * @param Xi_sum All the needed integration components, including R_k, Xi_1, Xi_2, Jr, Xi_3, Xi_4 + * @param F State transition matrix + * @param G Noise Jacobian + */ + void compute_F_and_G_analytic(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + const Eigen::Vector3d &w_uncorrected, const Eigen::Vector3d &a_uncorrected, const Eigen::Vector4d &new_q, + const Eigen::Vector3d &new_v, const Eigen::Vector3d &new_p, const Eigen::Matrix &Xi_sum, + Eigen::MatrixXd &F, Eigen::MatrixXd &G); + + /** + * @brief compute state transition matrix F and noise Jacobian G + * + * This function is for analytical integration or when using a different state representation. + * This contains our state transition matrix and noise Jacobians. + * If you have other state variables besides the IMU that evolve you would add them here. + * See the @ref error_prop page for details on how this was derived. + * + * @param state Pointer to state + * @param dt Time we should integrate over + * @param w_hat Angular velocity with bias removed + * @param a_hat Linear acceleration with bias removed + * @param w_uncorrected Angular velocity in acc frame with bias and gravity sensitivity removed + * @param new_q The resulting new orientation after integration + * @param new_v The resulting new velocity after integration + * @param new_p The resulting new position after integration + * @param F State transition matrix + * @param G Noise Jacobian + */ + void compute_F_and_G_discrete(std::shared_ptr state, double dt, const Eigen::Vector3d &w_hat, const Eigen::Vector3d &a_hat, + const Eigen::Vector3d &w_uncorrected, const Eigen::Vector3d &a_uncorrected, const Eigen::Vector4d &new_q, + const Eigen::Vector3d &new_v, const Eigen::Vector3d &new_p, Eigen::MatrixXd &F, Eigen::MatrixXd &G); + /// Container for the noise values NoiseManager _noises; diff --git a/ov_msckf/src/state/State.cpp b/ov_msckf/src/state/State.cpp index 2c0c83e48..f8c4612fa 100644 --- a/ov_msckf/src/state/State.cpp +++ b/ov_msckf/src/state/State.cpp @@ -37,6 +37,63 @@ State::State(StateOptions &options) { _variables.push_back(_imu); current_id += _imu->size(); + // Append the imu intrinsics to the state and covariance + // NOTE: these need to be right "next" to the IMU state in the covariance + // NOTE: since if calibrating these will evolve / be correlated during propagation + _calib_imu_dw = std::make_shared(6); + _calib_imu_da = std::make_shared(6); + if (options.imu_model == StateOptions::ImuModel::KALIBR) { + // lower triangular of the matrix (column-wise) + Eigen::Matrix _imu_default = Eigen::Matrix::Zero(); + _imu_default << 1.0, 0.0, 0.0, 1.0, 0.0, 1.0; + _calib_imu_dw->set_value(_imu_default); + _calib_imu_dw->set_fej(_imu_default); + _calib_imu_da->set_value(_imu_default); + _calib_imu_da->set_fej(_imu_default); + } else { + // upper triangular of the matrix (column-wise) + Eigen::Matrix _imu_default = Eigen::Matrix::Zero(); + _imu_default << 1.0, 0.0, 0.0, 1.0, 0.0, 1.0; + _calib_imu_dw->set_value(_imu_default); + _calib_imu_dw->set_fej(_imu_default); + _calib_imu_da->set_value(_imu_default); + _calib_imu_da->set_fej(_imu_default); + } + _calib_imu_tg = std::make_shared(9); + _calib_imu_GYROtoIMU = std::make_shared(); + _calib_imu_ACCtoIMU = std::make_shared(); + if (options.do_calib_imu_intrinsics) { + + // Gyroscope dw + _calib_imu_dw->set_local_id(current_id); + _variables.push_back(_calib_imu_dw); + current_id += _calib_imu_dw->size(); + + // Accelerometer da + _calib_imu_da->set_local_id(current_id); + _variables.push_back(_calib_imu_da); + current_id += _calib_imu_da->size(); + + // Gyroscope gravity sensitivity + if (options.do_calib_imu_g_sensitivity) { + _calib_imu_tg->set_local_id(current_id); + _variables.push_back(_calib_imu_tg); + current_id += _calib_imu_tg->size(); + } + + // If kalibr model, R_GYROtoIMU is calibrated + // If rpng model, R_ACCtoIMU is calibrated + if (options.imu_model == StateOptions::ImuModel::KALIBR) { + _calib_imu_GYROtoIMU->set_local_id(current_id); + _variables.push_back(_calib_imu_GYROtoIMU); + current_id += _calib_imu_GYROtoIMU->size(); + } else { + _calib_imu_ACCtoIMU->set_local_id(current_id); + _variables.push_back(_calib_imu_ACCtoIMU); + current_id += _calib_imu_ACCtoIMU->size(); + } + } + // Camera to IMU time offset _calib_dt_CAMtoIMU = std::make_shared(1); if (_options.do_calib_camera_timeoffset) { @@ -77,6 +134,18 @@ State::State(StateOptions &options) { _Cov = std::pow(1e-3, 2) * Eigen::MatrixXd::Identity(current_id, current_id); // Finally, set some of our priors for our calibration parameters + if (_options.do_calib_imu_intrinsics) { + _Cov.block(_calib_imu_dw->id(), _calib_imu_dw->id(), 6, 6) = std::pow(0.005, 2) * Eigen::Matrix::Identity(); + _Cov.block(_calib_imu_da->id(), _calib_imu_da->id(), 6, 6) = std::pow(0.008, 2) * Eigen::Matrix::Identity(); + if (_options.do_calib_imu_g_sensitivity) { + _Cov.block(_calib_imu_tg->id(), _calib_imu_tg->id(), 9, 9) = std::pow(0.005, 2) * Eigen::Matrix::Identity(); + } + if (_options.imu_model == StateOptions::ImuModel::KALIBR) { + _Cov.block(_calib_imu_GYROtoIMU->id(), _calib_imu_GYROtoIMU->id(), 3, 3) = std::pow(0.005, 2) * Eigen::Matrix3d::Identity(); + } else { + _Cov.block(_calib_imu_ACCtoIMU->id(), _calib_imu_ACCtoIMU->id(), 3, 3) = std::pow(0.005, 2) * Eigen::Matrix3d::Identity(); + } + } if (_options.do_calib_camera_timeoffset) { _Cov(_calib_dt_CAMtoIMU->id(), _calib_dt_CAMtoIMU->id()) = std::pow(0.01, 2); } @@ -84,7 +153,7 @@ State::State(StateOptions &options) { for (int i = 0; i < _options.num_cameras; i++) { _Cov.block(_calib_IMUtoCAM.at(i)->id(), _calib_IMUtoCAM.at(i)->id(), 3, 3) = std::pow(0.005, 2) * Eigen::MatrixXd::Identity(3, 3); _Cov.block(_calib_IMUtoCAM.at(i)->id() + 3, _calib_IMUtoCAM.at(i)->id() + 3, 3, 3) = - std::pow(0.01, 2) * Eigen::MatrixXd::Identity(3, 3); + std::pow(0.015, 2) * Eigen::MatrixXd::Identity(3, 3); } } if (_options.do_calib_camera_intrinsics) { diff --git a/ov_msckf/src/state/State.h b/ov_msckf/src/state/State.h index 0737db5cc..f83dee5ce 100644 --- a/ov_msckf/src/state/State.h +++ b/ov_msckf/src/state/State.h @@ -80,10 +80,64 @@ class State { */ int max_covariance_size() { return (int)_Cov.rows(); } + /** + * @brief Gyroscope and accelerometer intrinsic matrix (scale imperfection and axis misalignment) + * + * If kalibr model, lower triangular of the matrix is used + * If rpng model, upper triangular of the matrix is used + * + * @return 3x3 matrix of current imu gyroscope / accelerometer intrinsics + */ + static Eigen::Matrix3d Dm(StateOptions::ImuModel imu_model, const Eigen::MatrixXd &vec) { + assert(vec.rows() == 6); + assert(vec.cols() == 1); + Eigen::Matrix3d D_matrix = Eigen::Matrix3d::Identity(); + if (imu_model == StateOptions::ImuModel::KALIBR) { + D_matrix << vec(0), 0, 0, vec(1), vec(3), 0, vec(2), vec(4), vec(5); + } else { + D_matrix << vec(0), vec(1), vec(3), 0, vec(2), vec(4), 0, 0, vec(5); + } + return D_matrix; + } + + /** + * @brief Gyroscope gravity sensitivity + * + * For both kalibr and rpng models, this a 3x3 that is column-wise filled. + * + * @return 3x3 matrix of current gravity sensitivity + */ + static Eigen::Matrix3d Tg(const Eigen::MatrixXd &vec) { + assert(vec.rows() == 9); + assert(vec.cols() == 1); + Eigen::Matrix3d Tg = Eigen::Matrix3d::Zero(); + Tg << vec(0), vec(3), vec(6), vec(1), vec(4), vec(7), vec(2), vec(5), vec(8); + return Tg; + } + + /** + * @brief Calculates the error state size for imu intrinsics. + * + * This is used to construct our state transition which depends on if we are estimating calibration. + * 15 if doing intrinsics, another +9 if doing grav sensitivity + * + * @return size of error state + */ + int imu_intrinsic_size() const { + int sz = 0; + if (_options.do_calib_imu_intrinsics) { + sz += 15; + if (_options.do_calib_imu_g_sensitivity) { + sz += 9; + } + } + return sz; + } + /// Mutex for locking access to the state std::mutex _mutex_state; - /// Current timestamp (should be the last update time!) + /// Current timestamp (should be the last update time in camera clock frame!) double _timestamp = -1; /// Struct containing filter options @@ -110,6 +164,21 @@ class State { /// Camera intrinsics camera objects std::unordered_map> _cam_intrinsics_cameras; + /// Gyroscope IMU intrinsics (scale imperfection and axis misalignment) + std::shared_ptr _calib_imu_dw; + + /// Accelerometer IMU intrinsics (scale imperfection and axis misalignment) + std::shared_ptr _calib_imu_da; + + /// Gyroscope gravity sensitivity + std::shared_ptr _calib_imu_tg; + + /// Rotation from gyroscope frame to the "IMU" accelerometer frame (kalibr model) + std::shared_ptr _calib_imu_GYROtoIMU; + + /// Rotation from accelerometer to the "IMU" gyroscope frame frame (rpng model) + std::shared_ptr _calib_imu_ACCtoIMU; + private: // Define that the state helper is a friend class of this class // This will allow it to access the below functions which should normally not be called diff --git a/ov_msckf/src/state/StateHelper.h b/ov_msckf/src/state/StateHelper.h index 06b2bee58..6c74013a0 100644 --- a/ov_msckf/src/state/StateHelper.h +++ b/ov_msckf/src/state/StateHelper.h @@ -82,8 +82,8 @@ class StateHelper { * @param state Pointer to state * @param H_order Variable ordering used in the compressed Jacobian * @param H Condensed Jacobian of updating measurement - * @param res residual of updating measurement - * @param R updating measurement covariance + * @param res Residual of updating measurement + * @param R Updating measurement covariance */ static void EKFUpdate(std::shared_ptr state, const std::vector> &H_order, const Eigen::MatrixXd &H, const Eigen::VectorXd &res, const Eigen::MatrixXd &R); @@ -107,7 +107,7 @@ class StateHelper { * * @param state Pointer to state * @param small_variables Vector of variables whose marginal covariance is desired - * @return marginal covariance of the passed variables + * @return Marginal covariance of the passed variables */ static Eigen::MatrixXd get_marginal_covariance(std::shared_ptr state, const std::vector> &small_variables); @@ -120,7 +120,7 @@ class StateHelper { * Please use the other interface functions in the StateHelper to progamatically change to covariance. * * @param state Pointer to state - * @return covariance of current state + * @return Covariance of current state */ static Eigen::MatrixXd get_full_covariance(std::shared_ptr state); @@ -190,8 +190,9 @@ class StateHelper { * After propagation, normally we augment the state with an new clone that is at the new update timestep. * This augmentation clones the IMU pose and adds it to our state's clone map. * If we are doing time offset calibration we also make our cloning a function of the time offset. - * Time offset logic is based on Mingyang Li and Anastasios I. Mourikis paper: - * http://journals.sagepub.com/doi/pdf/10.1177/0278364913515286 We can write the current clone at the true imu base clock time as the + * Time offset logic is based on Li and Mourikis @cite Li2014IJRR. + * + * We can write the current clone at the true imu base clock time as the * follow: \f{align*}{ * {}^{I_{t+t_d}}_G\bar{q} &= \begin{bmatrix}\frac{1}{2} {}^{I_{t+\hat{t}_d}}\boldsymbol\omega \tilde{t}_d \\ * 1\end{bmatrix}\otimes{}^{I_{t+\hat{t}_d}}_G\bar{q} \\ diff --git a/ov_msckf/src/state/StateOptions.h b/ov_msckf/src/state/StateOptions.h index 3f68c78ef..95836e9f9 100644 --- a/ov_msckf/src/state/StateOptions.h +++ b/ov_msckf/src/state/StateOptions.h @@ -25,8 +25,7 @@ #include "types/LandmarkRepresentation.h" #include "utils/opencv_yaml_parse.h" #include "utils/print.h" - -#include +#include "utils/sensor_data.h" namespace ov_msckf { @@ -38,11 +37,11 @@ struct StateOptions { /// Bool to determine whether or not to do first estimate Jacobians bool do_fej = true; - /// Bool to determine whether or not use imu message averaging - bool imu_avg = false; + /// Numerical integration methods + enum IntegrationMethod { DISCRETE, RK4, ANALYTICAL }; - /// Bool to determine if we should use Rk4 imu integration - bool use_rk4_integration = true; + /// What type of numerical integration is used during propagation + IntegrationMethod integration_method = IntegrationMethod::RK4; /// Bool to determine whether or not to calibrate imu-to-camera pose bool do_calib_camera_pose = false; @@ -53,6 +52,18 @@ struct StateOptions { /// Bool to determine whether or not to calibrate camera to IMU time offset bool do_calib_camera_timeoffset = false; + /// Bool to determine whether or not to calibrate the IMU intrinsics + bool do_calib_imu_intrinsics = false; + + /// Bool to determine whether or not to calibrate the Gravity sensitivity + bool do_calib_imu_g_sensitivity = false; + + /// IMU intrinsic models + enum ImuModel { KALIBR, RPNG }; + + /// What model our IMU intrinsics are + ImuModel imu_model = ImuModel::KALIBR; + /// Max clone size of sliding window int max_clone_size = 11; @@ -84,17 +95,38 @@ struct StateOptions { void print(const std::shared_ptr &parser = nullptr) { if (parser != nullptr) { parser->parse_config("use_fej", do_fej); - parser->parse_config("use_imuavg", imu_avg); - parser->parse_config("use_rk4int", use_rk4_integration); + + // Integration method + std::string integration_str = "rk4"; + parser->parse_config("integration", integration_str); + if (integration_str == "discrete") { + integration_method = IntegrationMethod::DISCRETE; + } else if (integration_str == "rk4") { + integration_method = IntegrationMethod::RK4; + } else if (integration_str == "analytical") { + integration_method = IntegrationMethod::ANALYTICAL; + } else { + PRINT_ERROR(RED "invalid imu integration model: %s\n" RESET, integration_str.c_str()); + PRINT_ERROR(RED "please select a valid model: discrete, rk4, analytical\n" RESET); + std::exit(EXIT_FAILURE); + } + + // Calibration booleans parser->parse_config("calib_cam_extrinsics", do_calib_camera_pose); parser->parse_config("calib_cam_intrinsics", do_calib_camera_intrinsics); parser->parse_config("calib_cam_timeoffset", do_calib_camera_timeoffset); + parser->parse_config("calib_imu_intrinsics", do_calib_imu_intrinsics); + parser->parse_config("calib_imu_g_sensitivity", do_calib_imu_g_sensitivity); + + // State parameters parser->parse_config("max_clones", max_clone_size); parser->parse_config("max_slam", max_slam_features); parser->parse_config("max_slam_in_update", max_slam_in_update); parser->parse_config("max_msckf_in_update", max_msckf_in_update); parser->parse_config("num_aruco", max_aruco_features); parser->parse_config("max_cameras", num_cameras); + + // Feature representations std::string rep1 = ov_type::LandmarkRepresentation::as_string(feat_rep_msckf); parser->parse_config("feat_rep_msckf", rep1); feat_rep_msckf = ov_type::LandmarkRepresentation::from_string(rep1); @@ -104,13 +136,33 @@ struct StateOptions { std::string rep3 = ov_type::LandmarkRepresentation::as_string(feat_rep_aruco); parser->parse_config("feat_rep_aruco", rep3); feat_rep_aruco = ov_type::LandmarkRepresentation::from_string(rep3); + + // IMU model + std::string imu_model_str = "kalibr"; + parser->parse_external("relative_config_imu", "imu0", "model", imu_model_str); + if (imu_model_str == "kalibr" || imu_model_str == "calibrated") { + imu_model = ImuModel::KALIBR; + } else if (imu_model_str == "rpng") { + imu_model = ImuModel::RPNG; + } else { + PRINT_ERROR(RED "invalid imu model: %s\n" RESET, imu_model_str.c_str()); + PRINT_ERROR(RED "please select a valid model: kalibr, rpng\\n" RESET); + std::exit(EXIT_FAILURE); + } + if (imu_model_str == "calibrated" && (do_calib_imu_intrinsics || do_calib_imu_g_sensitivity)) { + PRINT_ERROR(RED "calibrated IMU model selected, but requested calibration!\n" RESET); + PRINT_ERROR(RED "please select what model you have: kalibr, rpng\n" RESET); + std::exit(EXIT_FAILURE); + } } PRINT_DEBUG(" - use_fej: %d\n", do_fej); - PRINT_DEBUG(" - use_imuavg: %d\n", imu_avg); - PRINT_DEBUG(" - use_rk4int: %d\n", use_rk4_integration); + PRINT_DEBUG(" - integration: %d\n", integration_method); PRINT_DEBUG(" - calib_cam_extrinsics: %d\n", do_calib_camera_pose); PRINT_DEBUG(" - calib_cam_intrinsics: %d\n", do_calib_camera_intrinsics); PRINT_DEBUG(" - calib_cam_timeoffset: %d\n", do_calib_camera_timeoffset); + PRINT_DEBUG(" - calib_imu_intrinsics: %d\n", do_calib_imu_intrinsics); + PRINT_DEBUG(" - calib_imu_g_sensitivity: %d\n", do_calib_imu_g_sensitivity); + PRINT_DEBUG(" - imu_model: %d\n", imu_model); PRINT_DEBUG(" - max_clones: %d\n", max_clone_size); PRINT_DEBUG(" - max_slam: %d\n", max_slam_features); PRINT_DEBUG(" - max_slam_in_update: %d\n", max_slam_in_update); diff --git a/ov_msckf/src/update/UpdaterZeroVelocity.cpp b/ov_msckf/src/update/UpdaterZeroVelocity.cpp index b3a0565a2..3c5584626 100644 --- a/ov_msckf/src/update/UpdaterZeroVelocity.cpp +++ b/ov_msckf/src/update/UpdaterZeroVelocity.cpp @@ -108,7 +108,7 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest // If we should integrate the acceleration and say the velocity should be zero // Also if we should still inflate the bias based on their random walk noises - bool integrated_accel_constraint = false; + bool integrated_accel_constraint = false; // untested bool model_time_varying_bias = true; bool override_with_disparity_check = true; bool explicitly_enforce_zero_motion = false; @@ -128,7 +128,13 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest Eigen::MatrixXd H = Eigen::MatrixXd::Zero(m_size, h_size); Eigen::VectorXd res = Eigen::VectorXd::Zero(m_size); + // IMU intrinsic calibration estimates (static) + Eigen::Matrix3d Dw = State::Dm(state->_options.imu_model, state->_calib_imu_dw->value()); + Eigen::Matrix3d Da = State::Dm(state->_options.imu_model, state->_calib_imu_da->value()); + Eigen::Matrix3d Tg = State::Tg(state->_calib_imu_tg->value()); + // Loop through all our IMU and construct the residual and Jacobian + // TODO: should add jacobians here in respect to IMU intrinsics!! // State order is: [q_GtoI, bg, ba, v_IinG] // Measurement order is: [w_true = 0, a_true = 0 or v_k+1 = 0] // w_true = w_m - bw - nw @@ -139,7 +145,8 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest // Precomputed values double dt = imu_recent.at(i + 1).timestamp - imu_recent.at(i).timestamp; - Eigen::Vector3d a_hat = imu_recent.at(i).am - state->_imu->bias_a(); + Eigen::Vector3d a_hat = state->_calib_imu_ACCtoIMU->Rot() * Da * (imu_recent.at(i).am - state->_imu->bias_a()); + Eigen::Vector3d w_hat = state->_calib_imu_GYROtoIMU->Rot() * Dw * (imu_recent.at(i).wm - state->_imu->bias_g() - Tg * a_hat); // Measurement noise (convert from continuous to discrete) // NOTE: The dt time might be different if we have "cut" any imu measurements @@ -151,11 +158,10 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest double w_accel_v = 1.0 / (std::sqrt(dt) * _noises.sigma_a); // Measurement residual (true value is zero) - res.block(6 * i + 0, 0, 3, 1) = -w_omega * (imu_recent.at(i).wm - state->_imu->bias_g()); + res.block(6 * i + 0, 0, 3, 1) = -w_omega * w_hat; if (!integrated_accel_constraint) { res.block(6 * i + 3, 0, 3, 1) = -w_accel * (a_hat - state->_imu->Rot() * _gravity); } else { - assert(false); res.block(6 * i + 3, 0, 3, 1) = -w_accel_v * (state->_imu->vel() - _gravity * dt + state->_imu->Rot().transpose() * a_hat * dt); } @@ -166,7 +172,6 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest H.block(6 * i + 3, 0, 3, 3) = -w_accel * skew_x(R_GtoI_jacob * _gravity); H.block(6 * i + 3, 6, 3, 3) = -w_accel * Eigen::Matrix3d::Identity(); } else { - assert(false); H.block(6 * i + 3, 0, 3, 3) = -w_accel_v * R_GtoI_jacob.transpose() * skew_x(a_hat) * dt; H.block(6 * i + 3, 6, 3, 3) = -w_accel_v * R_GtoI_jacob.transpose() * dt; H.block(6 * i + 3, 9, 3, 3) = w_accel_v * Eigen::Matrix3d::Identity(); @@ -181,7 +186,7 @@ bool UpdaterZeroVelocity::try_update(std::shared_ptr state, double timest } // Multiply our noise matrix by a fixed amount - // We typically need to treat the IMU as being "worst" to detect / not become over confident + // We typically need to treat the IMU as being "worst" to detect / not become overconfident Eigen::MatrixXd R = _zupt_noise_multiplier * Eigen::MatrixXd::Identity(res.rows(), res.rows()); // Next propagate the biases forward in time