From 845957b5c5075b1b6ec27f3e4383f905e7003669 Mon Sep 17 00:00:00 2001 From: Hauke Strasdat Date: Mon, 8 Apr 2024 23:13:40 -0700 Subject: [PATCH] feat: batch (#15) --- .github/workflows/py_wheels.yml | 2 +- Cargo.toml | 22 +- LICENSE-APACHE | 2 +- crates/sophus/Cargo.toml | 4 +- crates/sophus/examples/pose_graph.rs | 23 +- crates/sophus/examples/viewer_ex.rs | 35 +- crates/sophus/src/lib.rs | 5 +- crates/sophus/src/viewer.rs | 35 +- crates/sophus/src/viewer/actor.rs | 13 +- crates/sophus/src/viewer/offscreen.rs | 8 +- crates/sophus/src/viewer/scene_renderer.rs | 24 +- .../src/viewer/scene_renderer/buffers.rs | 8 +- .../viewer/scene_renderer/depth_renderer.rs | 5 +- .../src/viewer/scene_renderer/interaction.rs | 22 +- crates/sophus_calculus/Cargo.toml | 20 - crates/sophus_calculus/src/dual.rs | 6 - .../sophus_calculus/src/dual/dual_matrix.rs | 593 ------- .../sophus_calculus/src/dual/dual_scalar.rs | 505 ------ .../sophus_calculus/src/dual/dual_vector.rs | 423 ----- crates/sophus_calculus/src/maps/curves.rs | 172 -- .../src/maps/matrix_valued_maps.rs | 228 --- .../src/maps/scalar_valued_maps.rs | 179 -- .../src/maps/vector_valued_maps.rs | 241 --- crates/sophus_calculus/src/points.rs | 29 - crates/sophus_calculus/src/types.rs | 24 - crates/sophus_calculus/src/types/matrix.rs | 181 -- crates/sophus_calculus/src/types/params.rs | 23 - crates/sophus_calculus/src/types/scalar.rs | 144 -- crates/sophus_calculus/src/types/vector.rs | 149 -- .../{sophus_tensor => sophus_core}/Cargo.toml | 5 +- .../lib.rs => sophus_core/src/calculus.rs} | 6 +- crates/sophus_core/src/calculus/dual.rs | 6 + .../src/calculus/dual/dual_matrix.rs | 1461 +++++++++++++++++ .../src/calculus/dual/dual_scalar.rs | 1441 ++++++++++++++++ .../src/calculus/dual/dual_vector.rs | 1176 +++++++++++++ .../src/calculus}/manifold.rs | 0 .../src/calculus}/manifold/traits.rs | 44 +- .../src => sophus_core/src/calculus}/maps.rs | 0 .../sophus_core/src/calculus/maps/curves.rs | 210 +++ .../src/calculus/maps/matrix_valued_maps.rs | 300 ++++ .../src/calculus/maps/scalar_valued_maps.rs | 221 +++ .../src/calculus/maps/vector_valued_maps.rs | 322 ++++ .../src/calculus}/region.rs | 0 .../src/calculus}/spline.rs | 59 +- .../src/calculus}/spline/spline_segment.rs | 177 +- crates/sophus_core/src/lib.rs | 12 + crates/sophus_core/src/linalg.rs | 94 ++ crates/sophus_core/src/linalg/bool_mask.rs | 71 + crates/sophus_core/src/linalg/matrix.rs | 426 +++++ crates/sophus_core/src/linalg/scalar.rs | 693 ++++++++ crates/sophus_core/src/linalg/vector.rs | 393 +++++ crates/sophus_core/src/params.rs | 53 + crates/sophus_core/src/points.rs | 28 + .../src/lib.rs => sophus_core/src/tensor.rs} | 4 +- crates/sophus_core/src/tensor/arc_tensor.rs | 435 +++++ crates/sophus_core/src/tensor/element.rs | 193 +++ .../src => sophus_core/src/tensor}/layout.rs | 5 +- .../src/tensor}/mut_tensor.rs | 378 ++--- .../src/tensor/mut_tensor_view.rs} | 72 +- .../src/tensor/tensor_view.rs} | 315 ++-- crates/sophus_image/Cargo.toml | 3 +- crates/sophus_image/src/arc_image.rs | 83 +- crates/sophus_image/src/image_view.rs | 63 +- crates/sophus_image/src/intensity_image.rs | 162 +- crates/sophus_image/src/interpolation.rs | 5 +- crates/sophus_image/src/lib.rs | 1 + crates/sophus_image/src/mut_image.rs | 94 +- crates/sophus_image/src/mut_image_view.rs | 64 +- crates/sophus_lie/Cargo.toml | 4 +- crates/sophus_lie/src/factor_lie_group.rs | 177 ++ crates/sophus_lie/src/groups.rs | 10 + crates/sophus_lie/src/groups/isometry2.rs | 49 + crates/sophus_lie/src/groups/isometry3.rs | 48 + crates/sophus_lie/src/groups/rotation2.rs | 319 ++++ crates/sophus_lie/src/groups/rotation3.rs | 635 +++++++ .../translation_product_product.rs | 238 +-- crates/sophus_lie/src/isometry2.rs | 46 - crates/sophus_lie/src/isometry3.rs | 58 - crates/sophus_lie/src/lib.rs | 24 +- crates/sophus_lie/src/lie_group.rs | 735 +-------- crates/sophus_lie/src/lie_group_manifold.rs | 97 ++ crates/sophus_lie/src/pyo3.rs | 2 - crates/sophus_lie/src/real_lie_group.rs | 555 +++++++ crates/sophus_lie/src/rotation2.rs | 292 ---- crates/sophus_lie/src/rotation3.rs | 589 ------- crates/sophus_lie/src/traits.rs | 91 +- crates/sophus_opt/Cargo.toml | 2 +- crates/sophus_opt/src/block.rs | 5 +- .../src/example_problems/cam_calib.rs | 43 +- .../cost_fn/isometry2_prior.rs | 54 +- .../cost_fn/isometry3_prior.rs | 56 +- .../example_problems/cost_fn/pose_graph.rs | 34 +- .../example_problems/cost_fn/reprojection.rs | 85 +- .../src/example_problems/pose_circle.rs | 59 +- .../src/example_problems/simple_prior.rs | 57 +- crates/sophus_opt/src/lib.rs | 1 + crates/sophus_opt/src/solvers.rs | 3 - crates/sophus_opt/src/term.rs | 5 +- crates/sophus_opt/src/variables.rs | 20 +- crates/sophus_pyo3/Cargo.toml | 5 +- crates/sophus_pyo3/src/lib.rs | 6 +- crates/sophus_pyo3/src/pyo3/errors.rs | 3 +- crates/sophus_pyo3/src/pyo3/lie_groups.rs | 32 +- crates/sophus_sensor/Cargo.toml | 3 +- crates/sophus_sensor/src/affine.rs | 61 - crates/sophus_sensor/src/camera.rs | 123 ++ crates/sophus_sensor/src/camera_enum.rs | 4 + .../src/camera_enum/general_camera.rs | 77 + .../src/camera_enum/perspective_camera.rs | 95 ++ crates/sophus_sensor/src/distortion_table.rs | 79 +- crates/sophus_sensor/src/distortions.rs | 4 + .../sophus_sensor/src/distortions/affine.rs | 65 + .../src/distortions/kannala_brandt.rs | 223 +++ crates/sophus_sensor/src/dyn_camera.rs | 126 +- crates/sophus_sensor/src/general_camera.rs | 93 -- crates/sophus_sensor/src/generic_camera.rs | 211 --- crates/sophus_sensor/src/kannala_brandt.rs | 211 --- crates/sophus_sensor/src/lib.rs | 27 +- crates/sophus_sensor/src/ortho_camera.rs | 33 - .../sophus_sensor/src/perspective_camera.rs | 136 -- crates/sophus_sensor/src/projections.rs | 4 + .../src/projections/orthographic.rs | 36 + .../src/projections/perspective.rs | 44 + crates/sophus_sensor/src/traits.rs | 62 +- crates/sophus_tensor/src/arc_tensor.rs | 599 ------- crates/sophus_tensor/src/element.rs | 374 ----- rust-toolchain.toml | 2 + sophus-rs.code-workspace | 10 +- 128 files changed, 11590 insertions(+), 7751 deletions(-) delete mode 100644 crates/sophus_calculus/Cargo.toml delete mode 100644 crates/sophus_calculus/src/dual.rs delete mode 100644 crates/sophus_calculus/src/dual/dual_matrix.rs delete mode 100644 crates/sophus_calculus/src/dual/dual_scalar.rs delete mode 100644 crates/sophus_calculus/src/dual/dual_vector.rs delete mode 100644 crates/sophus_calculus/src/maps/curves.rs delete mode 100644 crates/sophus_calculus/src/maps/matrix_valued_maps.rs delete mode 100644 crates/sophus_calculus/src/maps/scalar_valued_maps.rs delete mode 100644 crates/sophus_calculus/src/maps/vector_valued_maps.rs delete mode 100644 crates/sophus_calculus/src/points.rs delete mode 100644 crates/sophus_calculus/src/types.rs delete mode 100644 crates/sophus_calculus/src/types/matrix.rs delete mode 100644 crates/sophus_calculus/src/types/params.rs delete mode 100644 crates/sophus_calculus/src/types/scalar.rs delete mode 100644 crates/sophus_calculus/src/types/vector.rs rename crates/{sophus_tensor => sophus_core}/Cargo.toml (86%) rename crates/{sophus_calculus/src/lib.rs => sophus_core/src/calculus.rs} (65%) create mode 100644 crates/sophus_core/src/calculus/dual.rs create mode 100644 crates/sophus_core/src/calculus/dual/dual_matrix.rs create mode 100644 crates/sophus_core/src/calculus/dual/dual_scalar.rs create mode 100644 crates/sophus_core/src/calculus/dual/dual_vector.rs rename crates/{sophus_calculus/src => sophus_core/src/calculus}/manifold.rs (100%) rename crates/{sophus_calculus/src => sophus_core/src/calculus}/manifold/traits.rs (55%) rename crates/{sophus_calculus/src => sophus_core/src/calculus}/maps.rs (100%) create mode 100644 crates/sophus_core/src/calculus/maps/curves.rs create mode 100644 crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs create mode 100644 crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs create mode 100644 crates/sophus_core/src/calculus/maps/vector_valued_maps.rs rename crates/{sophus_calculus/src => sophus_core/src/calculus}/region.rs (100%) rename crates/{sophus_calculus/src => sophus_core/src/calculus}/spline.rs (80%) rename crates/{sophus_calculus/src => sophus_core/src/calculus}/spline/spline_segment.rs (60%) create mode 100644 crates/sophus_core/src/lib.rs create mode 100644 crates/sophus_core/src/linalg.rs create mode 100644 crates/sophus_core/src/linalg/bool_mask.rs create mode 100644 crates/sophus_core/src/linalg/matrix.rs create mode 100644 crates/sophus_core/src/linalg/scalar.rs create mode 100644 crates/sophus_core/src/linalg/vector.rs create mode 100644 crates/sophus_core/src/params.rs create mode 100644 crates/sophus_core/src/points.rs rename crates/{sophus_tensor/src/lib.rs => sophus_core/src/tensor.rs} (80%) create mode 100644 crates/sophus_core/src/tensor/arc_tensor.rs create mode 100644 crates/sophus_core/src/tensor/element.rs rename crates/{sophus_tensor/src => sophus_core/src/tensor}/layout.rs (98%) rename crates/{sophus_tensor/src => sophus_core/src/tensor}/mut_tensor.rs (70%) rename crates/{sophus_tensor/src/mut_view.rs => sophus_core/src/tensor/mut_tensor_view.rs} (84%) rename crates/{sophus_tensor/src/view.rs => sophus_core/src/tensor/tensor_view.rs} (56%) create mode 100644 crates/sophus_lie/src/factor_lie_group.rs create mode 100644 crates/sophus_lie/src/groups.rs create mode 100644 crates/sophus_lie/src/groups/isometry2.rs create mode 100644 crates/sophus_lie/src/groups/isometry3.rs create mode 100644 crates/sophus_lie/src/groups/rotation2.rs create mode 100644 crates/sophus_lie/src/groups/rotation3.rs rename crates/sophus_lie/src/{ => groups}/translation_product_product.rs (67%) delete mode 100644 crates/sophus_lie/src/isometry2.rs delete mode 100644 crates/sophus_lie/src/isometry3.rs create mode 100644 crates/sophus_lie/src/lie_group_manifold.rs delete mode 100644 crates/sophus_lie/src/pyo3.rs create mode 100644 crates/sophus_lie/src/real_lie_group.rs delete mode 100644 crates/sophus_lie/src/rotation2.rs delete mode 100644 crates/sophus_lie/src/rotation3.rs delete mode 100644 crates/sophus_sensor/src/affine.rs create mode 100644 crates/sophus_sensor/src/camera.rs create mode 100644 crates/sophus_sensor/src/camera_enum.rs create mode 100644 crates/sophus_sensor/src/camera_enum/general_camera.rs create mode 100644 crates/sophus_sensor/src/camera_enum/perspective_camera.rs create mode 100644 crates/sophus_sensor/src/distortions.rs create mode 100644 crates/sophus_sensor/src/distortions/affine.rs create mode 100644 crates/sophus_sensor/src/distortions/kannala_brandt.rs delete mode 100644 crates/sophus_sensor/src/general_camera.rs delete mode 100644 crates/sophus_sensor/src/generic_camera.rs delete mode 100644 crates/sophus_sensor/src/kannala_brandt.rs delete mode 100644 crates/sophus_sensor/src/ortho_camera.rs delete mode 100644 crates/sophus_sensor/src/perspective_camera.rs create mode 100644 crates/sophus_sensor/src/projections.rs create mode 100644 crates/sophus_sensor/src/projections/orthographic.rs create mode 100644 crates/sophus_sensor/src/projections/perspective.rs delete mode 100644 crates/sophus_tensor/src/arc_tensor.rs delete mode 100644 crates/sophus_tensor/src/element.rs create mode 100644 rust-toolchain.toml diff --git a/.github/workflows/py_wheels.yml b/.github/workflows/py_wheels.yml index 0f07919..ce156c6 100644 --- a/.github/workflows/py_wheels.yml +++ b/.github/workflows/py_wheels.yml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - target: [x86_64, x86, aarch64, armv7] + target: [x86_64, aarch64, armv7] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/Cargo.toml b/Cargo.toml index 583fc64..076779a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,12 @@ [workspace] members = [ - "crates/sophus_tensor", - "crates/sophus_calculus", + "crates/sophus", + "crates/sophus_core", "crates/sophus_lie", "crates/sophus_pyo3", "crates/sophus_image", "crates/sophus_sensor", "crates/sophus_opt", - "crates/sophus", ] resolver = "2" @@ -25,14 +24,14 @@ version = "0.4.0" [workspace.dependencies] sophus = {path = "crates/sophus", version = "0.4.0"} -sophus_calculus = {path = "crates/sophus_calculus", version = "0.4.0"} +sophus_core = {path = "crates/sophus_core", version = "0.4.0"} sophus_image = {path = "crates/sophus_image", version = "0.4.0"} sophus_lie = {path = "crates/sophus_lie", version = "0.4.0"} sophus_opt = {path = "crates/sophus_opt", version = "0.4.0"} +sophus_pyo3 = {path = "crates/sophus_pyo3", version = "0.4.0"} sophus_sensor = {path = "crates/sophus_sensor", version = "0.4.0"} -sophus_tensor = {path = "crates/sophus_tensor", version = "0.4.0"} -approx = {version = "0.5.1"} +approx = "0.5.1" as-any = "0.3.1" assertables = "7.0.1" async-trait = "0.1.77" @@ -47,23 +46,16 @@ hollywood = "0.5.0" image = {version = "0.25", features = [ "jpeg", "png", - "tga", - "gif", - "ico", - "bmp", - "hdr", "tiff", - "avif", - "webp", ]} log = "0.4.14" nalgebra = {version = "0.32", features = ["rand"]} ndarray = {version = "0.15.4", features = ["approx-0_5"]} num-traits = "0.2.15" -numpy = "0.20" +numpy = "0.21" png = "0.17.11" rand = "0.8.5" -simba = "0.8.1" +sleef = "0.3.2" tokio = {version = "1", features = ["full"]} typenum = {version = "1.17.0", features = ["const-generics"]} wgpu = "0.19" diff --git a/LICENSE-APACHE b/LICENSE-APACHE index a7e77cb..1b5ec8b 100644 --- a/LICENSE-APACHE +++ b/LICENSE-APACHE @@ -173,4 +173,4 @@ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -END OF TERMS AND CONDITIONS \ No newline at end of file +END OF TERMS AND CONDITIONS diff --git a/crates/sophus/Cargo.toml b/crates/sophus/Cargo.toml index 22951f9..619c8fa 100644 --- a/crates/sophus/Cargo.toml +++ b/crates/sophus/Cargo.toml @@ -11,12 +11,12 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_calculus.workspace = true +sophus_core.workspace = true sophus_image.workspace = true sophus_lie.workspace = true sophus_opt.workspace = true +sophus_pyo3.workspace = true sophus_sensor.workspace = true -sophus_tensor.workspace = true approx.workspace = true assertables.workspace = true diff --git a/crates/sophus/examples/pose_graph.rs b/crates/sophus/examples/pose_graph.rs index 21124d4..a84aed6 100644 --- a/crates/sophus/examples/pose_graph.rs +++ b/crates/sophus/examples/pose_graph.rs @@ -1,17 +1,11 @@ use hollywood::actors::egui::EguiActor; - use hollywood::actors::egui::Stream; pub use hollywood::compute::Context; pub use hollywood::core::*; use hollywood::macros::*; -use sophus::calculus::types::vector::IsVector; -use sophus::calculus::types::VecF64; use sophus::image::image_view::ImageSize; -use sophus::lie::isometry2::Isometry2; -use sophus::lie::isometry3::Isometry3; use sophus::lie::traits::IsTranslationProductGroup; use sophus::opt::example_problems::pose_circle::PoseCircleProblem; -use sophus::sensor::perspective_camera::KannalaBrandtCamera; use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; @@ -19,6 +13,13 @@ use sophus::viewer::actor::ViewerConfig; use sophus::viewer::renderable::*; use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; use sophus::viewer::SimpleViewer; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; +use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_sensor::camera_enum::perspective_camera::KannalaBrandtCamera; +use sophus_sensor::camera_enum::perspective_camera::PerspectiveCameraEnum; +use sophus_sensor::dyn_camera::DynCamera; #[actor(ContentGeneratorMessage)] type ContentGenerator = Actor< @@ -47,7 +48,7 @@ pub struct ContentGeneratorOutbound { pub packets: OutboundChannel>>, } -fn make_axes(world_from_local_poses: Vec>) -> Vec { +fn make_axes(world_from_local_poses: Vec>) -> Vec { let zero_in_local = VecF64::<2>::zeros(); let x_axis_local = VecF64::<2>::new(1.0, 0.0); let y_axis_local = VecF64::<2>::new(0.0, 1.0); @@ -124,7 +125,7 @@ impl InboundMessageNew for ContentGeneratorMessage { pub async fn run_viewer_example() { // Camera / view pose parameters - let intrinsics = KannalaBrandtCamera::::new( + let intrinsics = KannalaBrandtCamera::::new( &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), ImageSize { width: 640, @@ -137,7 +138,9 @@ pub async fn run_viewer_example() { far: 1000.0, }; let camera = ViewerCamera { - intrinsics, + intrinsics: DynCamera::::from_model(PerspectiveCameraEnum::KannalaBrandt( + intrinsics, + )), clipping_planes, scene_from_camera, }; @@ -157,7 +160,7 @@ pub async fn run_viewer_example() { ); // 3. The viewer actor let mut viewer = - EguiActor::, (), Isometry3>::from_builder(context, &builder); + EguiActor::, (), Isometry3>::from_builder(context, &builder); // Pipeline connections: timer diff --git a/crates/sophus/examples/viewer_ex.rs b/crates/sophus/examples/viewer_ex.rs index adc9c51..70b399e 100644 --- a/crates/sophus/examples/viewer_ex.rs +++ b/crates/sophus/examples/viewer_ex.rs @@ -7,14 +7,8 @@ pub use hollywood::core::request::RequestHub; pub use hollywood::core::*; use hollywood::macros::*; use nalgebra::SVector; -use sophus::calculus::types::vector::IsVector; -use sophus::calculus::types::VecF64; -use sophus::image::image_view::ImageSize; -use sophus::lie::isometry3::Isometry3; -use sophus::lie::traits::IsTranslationProductGroup; -use sophus::sensor::perspective_camera::KannalaBrandtCamera; - use sophus::image::arc_image::ArcImage4F32; +use sophus::image::image_view::ImageSize; use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; @@ -22,6 +16,11 @@ use sophus::viewer::actor::ViewerConfig; use sophus::viewer::renderable::*; use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; use sophus::viewer::SimpleViewer; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::traits::IsTranslationProductGroup; +use sophus_sensor::dyn_camera::DynCamera; #[actor(ContentGeneratorMessage)] type ContentGenerator = Actor< @@ -38,13 +37,13 @@ type ContentGenerator = Actor< pub enum ContentGeneratorMessage { /// in seconds ClockTick(f64), - SceneFromCamera(ReplyMessage>), + SceneFromCamera(ReplyMessage>), } /// Request of the simulation actor. pub struct ContentGeneratorRequest { /// Check time-stamp of receiver - pub scene_from_camera_request: RequestChannel<(), Isometry3, ContentGeneratorMessage>, + pub scene_from_camera_request: RequestChannel<(), Isometry3, ContentGeneratorMessage>, } impl RequestHub for ContentGeneratorRequest { @@ -78,8 +77,8 @@ impl Activate for ContentGeneratorRequest { pub struct ContentGeneratorState { pub counter: u32, pub show: bool, - pub intrinsics: KannalaBrandtCamera, - pub scene_from_camera: Isometry3, + pub intrinsics: DynCamera, + pub scene_from_camera: Isometry3, } impl Default for ContentGeneratorState { @@ -87,7 +86,7 @@ impl Default for ContentGeneratorState { ContentGeneratorState { counter: 0, show: false, - intrinsics: KannalaBrandtCamera::::new( + intrinsics: DynCamera::new_kannala_brandt( &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), ImageSize { width: 640, @@ -422,15 +421,15 @@ impl InboundMessageNew for ContentGeneratorMessage { } } -impl InboundMessageNew>> for ContentGeneratorMessage { - fn new(_inbound_name: String, scene_from_camera: ReplyMessage>) -> Self { +impl InboundMessageNew>> for ContentGeneratorMessage { + fn new(_inbound_name: String, scene_from_camera: ReplyMessage>) -> Self { ContentGeneratorMessage::SceneFromCamera(scene_from_camera) } } pub async fn run_viewer_example() { // Camera / view pose parameters - let intrinsics = KannalaBrandtCamera::::new( + let intrinsics = DynCamera::new_kannala_brandt( &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), ImageSize { width: 640, @@ -443,7 +442,7 @@ pub async fn run_viewer_example() { far: 1000.0, }; let camera = ViewerCamera { - intrinsics, + intrinsics: intrinsics.clone(), clipping_planes, scene_from_camera, }; @@ -462,13 +461,13 @@ pub async fn run_viewer_example() { ContentGeneratorState { counter: 0, show: false, - intrinsics, + intrinsics: intrinsics.clone(), scene_from_camera, }, ); // 3. The viewer actor let mut viewer = - EguiActor::, (), Isometry3>::from_builder(context, &builder); + EguiActor::, (), Isometry3>::from_builder(context, &builder); // Pipeline connections: timer diff --git a/crates/sophus/src/lib.rs b/crates/sophus/src/lib.rs index aba70fe..83e1018 100644 --- a/crates/sophus/src/lib.rs +++ b/crates/sophus/src/lib.rs @@ -1,9 +1,10 @@ -pub use sophus_calculus as calculus; +#![feature(portable_simd)] + +pub use sophus_core as core; pub use sophus_image as image; pub use sophus_lie as lie; pub use sophus_opt as opt; pub use sophus_sensor as sensor; -pub use sophus_tensor as tensor; pub mod viewer; diff --git a/crates/sophus/src/viewer.rs b/crates/sophus/src/viewer.rs index c7b10cd..b17fd34 100644 --- a/crates/sophus/src/viewer.rs +++ b/crates/sophus/src/viewer.rs @@ -4,18 +4,6 @@ pub mod pixel_renderer; pub mod renderable; pub mod scene_renderer; -use eframe::egui::load::SizedTexture; -use eframe::egui::Image; -use eframe::egui::Sense; -use eframe::egui::{self}; -use eframe::egui_wgpu::Renderer; -use eframe::epaint::mutex::RwLock; -use hollywood::actors::egui::EguiAppFromBuilder; -use hollywood::actors::egui::Stream; -use hollywood::compute::pipeline::CancelRequest; -use hollywood::core::request::RequestMessage; -use std::sync::Arc; - use self::actor::ViewerBuilder; use self::offscreen::OffscreenTexture; use self::pixel_renderer::PixelRenderer; @@ -26,14 +14,25 @@ use self::scene_renderer::SceneRenderer; use crate::image::arc_image::ArcImage4U8; use crate::image::image_view::ImageSize; use crate::image::image_view::IsImageView; -use crate::lie::isometry3::Isometry3; -use crate::sensor::perspective_camera::KannalaBrandtCamera; -use crate::tensor::view::IsTensorLike; use crate::viewer::pixel_renderer::LineVertex2; use crate::viewer::pixel_renderer::PointVertex2; use crate::viewer::scene_renderer::line::LineVertex3; use crate::viewer::scene_renderer::mesh::MeshVertex3; use crate::viewer::scene_renderer::point::PointVertex3; +use eframe::egui::load::SizedTexture; +use eframe::egui::Image; +use eframe::egui::Sense; +use eframe::egui::{self}; +use eframe::egui_wgpu::Renderer; +use eframe::epaint::mutex::RwLock; +use hollywood::actors::egui::EguiAppFromBuilder; +use hollywood::actors::egui::Stream; +use hollywood::compute::pipeline::CancelRequest; +use hollywood::core::request::RequestMessage; +use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_sensor::dyn_camera::DynCamera; +use std::sync::Arc; #[derive(Clone)] pub struct ViewerRenderState { @@ -88,13 +87,13 @@ impl BackgroundTexture { pub struct SimpleViewer { state: ViewerRenderState, offscreen: OffscreenTexture, - cam: KannalaBrandtCamera, + cam: DynCamera, pixel: PixelRenderer, scene: SceneRenderer, background_image: Option, background_texture: Option, message_recv: std::sync::mpsc::Receiver>>, - request_recv: std::sync::mpsc::Receiver>>, + request_recv: std::sync::mpsc::Receiver>>, cancel_request_sender: tokio::sync::mpsc::Sender, } @@ -111,7 +110,7 @@ impl EguiAppFromBuilder for SimpleViewer { Box::new(SimpleViewer { state: render_state.clone(), offscreen: OffscreenTexture::new(&render_state, &builder.config.camera.intrinsics), - cam: builder.config.camera.intrinsics, + cam: builder.config.camera.intrinsics.clone(), pixel: PixelRenderer::new(&render_state, &builder, depth_stencil.clone()), scene: SceneRenderer::new(&render_state, &builder, depth_stencil), message_recv: builder.message_recv, diff --git a/crates/sophus/src/viewer/actor.rs b/crates/sophus/src/viewer/actor.rs index f68e9eb..1393797 100644 --- a/crates/sophus/src/viewer/actor.rs +++ b/crates/sophus/src/viewer/actor.rs @@ -1,18 +1,17 @@ -use crate::lie::isometry3::Isometry3; -use crate::sensor::perspective_camera::KannalaBrandtCamera; use crate::viewer::scene_renderer::interaction::WgpuClippingPlanes; use crate::viewer::Renderable; use crate::viewer::ViewerRenderState; - use eframe::egui; use hollywood::actors::egui::EguiAppFromBuilder; use hollywood::actors::egui::GenericEguiBuilder; use hollywood::core::request::RequestMessage; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_sensor::dyn_camera::DynCamera; pub struct ViewerCamera { - pub intrinsics: KannalaBrandtCamera, + pub intrinsics: DynCamera, pub clipping_planes: WgpuClippingPlanes, - pub scene_from_camera: Isometry3, + pub scene_from_camera: Isometry3, } pub struct ViewerConfig { @@ -23,7 +22,7 @@ pub struct ViewerConfig { #[derive(Clone, Debug)] pub enum ViewerMessage { Packets(Vec), - RequestViewPose(RequestMessage<(), Isometry3>), + RequestViewPose(RequestMessage<(), Isometry3>), } impl Default for ViewerMessage { @@ -33,7 +32,7 @@ impl Default for ViewerMessage { } pub type ViewerBuilder = - GenericEguiBuilder, RequestMessage<(), Isometry3>, ViewerConfig>; + GenericEguiBuilder, RequestMessage<(), Isometry3>, ViewerConfig>; pub fn run_viewer_on_main_thread< Builder: 'static, diff --git a/crates/sophus/src/viewer/offscreen.rs b/crates/sophus/src/viewer/offscreen.rs index becb3fc..f47d9a8 100644 --- a/crates/sophus/src/viewer/offscreen.rs +++ b/crates/sophus/src/viewer/offscreen.rs @@ -1,10 +1,9 @@ use crate::image::arc_image::ArcImageF32; use crate::image::image_view::ImageSize; use crate::image::image_view::ImageViewF32; -use crate::sensor::perspective_camera::KannalaBrandtCamera; use crate::viewer::ViewerRenderState; - use eframe::egui::{self}; +use sophus_sensor::dyn_camera::DynCamera; #[derive(Debug)] pub(crate) struct OffscreenTexture { @@ -16,10 +15,7 @@ pub(crate) struct OffscreenTexture { } impl OffscreenTexture { - pub(crate) fn new( - render_state: &ViewerRenderState, - intrinsics: &KannalaBrandtCamera, - ) -> Self { + pub(crate) fn new(render_state: &ViewerRenderState, intrinsics: &DynCamera) -> Self { let w = intrinsics.image_size().width as f32; let h = intrinsics.image_size().height as f32; diff --git a/crates/sophus/src/viewer/scene_renderer.rs b/crates/sophus/src/viewer/scene_renderer.rs index e001065..66e9b64 100644 --- a/crates/sophus/src/viewer/scene_renderer.rs +++ b/crates/sophus/src/viewer/scene_renderer.rs @@ -6,20 +6,20 @@ pub mod mesh; pub mod point; pub mod textured_mesh; -use crate::calculus::region::IsRegion; -use crate::image::arc_image::ArcImageF32; -use crate::image::image_view::IsImageView; -use crate::sensor::perspective_camera::KannalaBrandtCamera; -use crate::tensor::view::IsTensorLike; -use crate::viewer::actor::ViewerBuilder; -use crate::viewer::DepthRenderer; -use crate::viewer::ViewerRenderState; - use self::buffers::SceneRenderBuffers; use self::interaction::Interaction; use self::mesh::MeshRenderer; use self::point::ScenePointRenderer; +use crate::image::arc_image::ArcImageF32; +use crate::viewer::actor::ViewerBuilder; +use crate::viewer::DepthRenderer; +use crate::viewer::ViewerRenderState; use eframe::egui; +use sophus_core::calculus::region::IsRegion; +use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_image::image_view::IsImageView; +use sophus_sensor::distortion_table::distort_table; +use sophus_sensor::dyn_camera::DynCamera; use wgpu::DepthStencilState; pub struct SceneRenderer { @@ -137,7 +137,7 @@ impl SceneRenderer { pub fn process_event( &mut self, - cam: &KannalaBrandtCamera, + cam: &DynCamera, response: &egui::Response, z_buffer: ArcImageF32, ) { @@ -244,7 +244,7 @@ impl SceneRenderer { self.textured_mesh_renderer.vertices.clear(); } - pub fn prepare(&self, state: &ViewerRenderState, intrinsics: &KannalaBrandtCamera) { + pub fn prepare(&self, state: &ViewerRenderState, intrinsics: &DynCamera) { state.queue.write_buffer( &self.point_renderer.vertex_buffer, 0, @@ -282,7 +282,7 @@ impl SceneRenderer { // distortion table let mut maybe_dist_lut = self.buffers.distortion_lut.lock().unwrap(); if maybe_dist_lut.is_none() { - let distort_lut = &intrinsics.distort_table(); + let distort_lut = distort_table(intrinsics); *maybe_dist_lut = Some(distort_lut.clone()); state.queue.write_texture( diff --git a/crates/sophus/src/viewer/scene_renderer/buffers.rs b/crates/sophus/src/viewer/scene_renderer/buffers.rs index d5c7b26..7bca347 100644 --- a/crates/sophus/src/viewer/scene_renderer/buffers.rs +++ b/crates/sophus/src/viewer/scene_renderer/buffers.rs @@ -97,10 +97,10 @@ impl SceneRenderBuffers { height: builder.config.camera.intrinsics.image_size().height as f32, near: 0.1, far: 1000.0, - fx: builder.config.camera.intrinsics.params()[0] as f32, - fy: builder.config.camera.intrinsics.params()[1] as f32, - px: builder.config.camera.intrinsics.params()[2] as f32, - py: builder.config.camera.intrinsics.params()[3] as f32, + fx: builder.config.camera.intrinsics.pinhole_params()[0] as f32, + fy: builder.config.camera.intrinsics.pinhole_params()[1] as f32, + px: builder.config.camera.intrinsics.pinhole_params()[2] as f32, + py: builder.config.camera.intrinsics.pinhole_params()[3] as f32, }; let transform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { diff --git a/crates/sophus/src/viewer/scene_renderer/depth_renderer.rs b/crates/sophus/src/viewer/scene_renderer/depth_renderer.rs index 9ec8706..0bba393 100644 --- a/crates/sophus/src/viewer/scene_renderer/depth_renderer.rs +++ b/crates/sophus/src/viewer/scene_renderer/depth_renderer.rs @@ -1,6 +1,5 @@ -use crate::sensor::perspective_camera::KannalaBrandtCamera; use crate::viewer::ViewerRenderState; - +use sophus_sensor::dyn_camera::DynCamera; use wgpu::DepthStencilState; pub struct DepthRenderer { @@ -13,7 +12,7 @@ pub struct DepthRenderer { impl DepthRenderer { pub fn new( state: &ViewerRenderState, - cam: &KannalaBrandtCamera, + cam: &DynCamera, depth_stencil: Option, ) -> Self { pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; diff --git a/crates/sophus/src/viewer/scene_renderer/interaction.rs b/crates/sophus/src/viewer/scene_renderer/interaction.rs index fb76b61..b0a52c5 100644 --- a/crates/sophus/src/viewer/scene_renderer/interaction.rs +++ b/crates/sophus/src/viewer/scene_renderer/interaction.rs @@ -1,12 +1,11 @@ use crate::image::arc_image::ArcImageF32; use crate::image::image_view::IsImageView; -use crate::lie::isometry3::Isometry3; use crate::lie::traits::IsTranslationProductGroup; -use crate::sensor::perspective_camera::KannalaBrandtCamera; -use crate::tensor::view::IsTensorLike; - use eframe::egui; -use sophus_calculus::types::VecF64; +use sophus_core::linalg::VecF64; +use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_sensor::dyn_camera::DynCamera; #[derive(Clone, Copy)] pub struct WgpuClippingPlanes { @@ -35,7 +34,7 @@ pub struct InteractionState { pub struct Interaction { pub maybe_state: Option, pub clipping_planes: WgpuClippingPlanes, - pub scene_from_camera: Isometry3, + pub scene_from_camera: Isometry3, } impl Interaction { @@ -62,7 +61,7 @@ impl Interaction { pub fn process_event( &mut self, - cam: &KannalaBrandtCamera, + cam: &DynCamera, response: &egui::Response, z_buffer: ArcImageF32, ) { @@ -120,10 +119,11 @@ impl Interaction { let delta = 0.01 * VecF64::<6>::new(0.0, 0.0, 0.0, -delta_y as f64, delta_x as f64, 0.0); let camera_from_scene_point = Isometry3::from_t(&cam.cam_unproj_with_z(&pixel, depth)); - self.scene_from_camera = self.scene_from_camera - * &camera_from_scene_point - * &Isometry3::exp(&delta) - * &camera_from_scene_point.inverse(); + self.scene_from_camera = + self.scene_from_camera + .group_mul(&camera_from_scene_point.group_mul( + &Isometry3::exp(&delta).group_mul(&camera_from_scene_point.inverse()), + )); } } } diff --git a/crates/sophus_calculus/Cargo.toml b/crates/sophus_calculus/Cargo.toml deleted file mode 100644 index bf805a9..0000000 --- a/crates/sophus_calculus/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -description = "sophus - geometry for robotics and computer vision" -name = "sophus_calculus" -readme = "../../README.md" - -edition.workspace = true -include.workspace = true -keywords.workspace = true -license.workspace = true -repository.workspace = true -version.workspace = true - -[dependencies] -sophus_tensor.workspace = true - -approx.workspace = true -assertables.workspace = true -nalgebra.workspace = true -num-traits.workspace = true -simba.workspace = true diff --git a/crates/sophus_calculus/src/dual.rs b/crates/sophus_calculus/src/dual.rs deleted file mode 100644 index c683b16..0000000 --- a/crates/sophus_calculus/src/dual.rs +++ /dev/null @@ -1,6 +0,0 @@ -///! Dual matrix. -pub mod dual_matrix; -///! Dual scalar. -pub mod dual_scalar; -///! Dual vector. -pub mod dual_vector; diff --git a/crates/sophus_calculus/src/dual/dual_matrix.rs b/crates/sophus_calculus/src/dual/dual_matrix.rs deleted file mode 100644 index a3f0702..0000000 --- a/crates/sophus_calculus/src/dual/dual_matrix.rs +++ /dev/null @@ -1,593 +0,0 @@ -use crate::dual::dual_scalar::Dual; -use crate::dual::dual_vector::DualV; -use crate::types::matrix::IsMatrix; -use crate::types::vector::IsVectorLike; -use crate::types::MatF64; -use crate::types::VecF64; - -use sophus_tensor::mut_tensor::MutTensorDD; -use sophus_tensor::mut_tensor::MutTensorDDR; -use sophus_tensor::mut_tensor::MutTensorDDRC; -use sophus_tensor::mut_view::IsMutTensorLike; -use sophus_tensor::view::IsTensorLike; - -use std::fmt::Debug; -use std::ops::Add; -use std::ops::Mul; -use std::ops::Neg; -use std::ops::Sub; - -/// Dual matrix -#[derive(Clone)] -pub struct DualM { - /// value - real matrix - pub val: MatF64, - /// derivative - infinitesimal matrix - pub dij_val: Option>, -} - -impl DualM { - fn binary_mm_dij< - const R0: usize, - const R1: usize, - const C0: usize, - const C1: usize, - F: FnMut(&MatF64) -> MatF64, - G: FnMut(&MatF64) -> MatF64, - >( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = - MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { - left_op(l_dij) + right_op(r_dij) - }); - Some(dyn_mat) - } - } - } - - fn binary_mv_dij< - const R0: usize, - const R1: usize, - const C0: usize, - F: FnMut(&MatF64) -> VecF64, - G: FnMut(&VecF64) -> VecF64, - >( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = - MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { - left_op(l_dij) + right_op(r_dij) - }); - Some(dyn_mat) - } - } - } - - fn binary_ms_dij< - const R0: usize, - const C0: usize, - F: FnMut(&MatF64) -> MatF64, - G: FnMut(&f64) -> MatF64, - >( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = - MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { - left_op(l_dij) + right_op(r_dij) - }); - Some(dyn_mat) - } - } - } - - /// derivatives - pub fn two_dx( - mut lhs_dx: Option>, - mut rhs_dx: Option>, - ) -> Option> { - if lhs_dx.is_none() && rhs_dx.is_none() { - return None; - } - - if lhs_dx.is_some() && rhs_dx.is_some() { - assert_eq!( - lhs_dx.clone().unwrap().dims(), - rhs_dx.clone().unwrap().dims() - ); - } - - if lhs_dx.is_none() { - lhs_dx = Some(MutTensorDDRC::::from_shape( - rhs_dx.clone().unwrap().dims(), - )) - } else if rhs_dx.is_none() { - rhs_dx = Some(MutTensorDDRC::::from_shape( - lhs_dx.clone().unwrap().dims(), - )) - } - - Some(DijPairM { - lhs: lhs_dx.unwrap(), - rhs: rhs_dx.unwrap(), - }) - } - - /// derivatives - pub fn two_dx_from_vec( - mut lhs_dx: Option>, - mut rhs_dx: Option>, - ) -> Option> { - if lhs_dx.is_none() && rhs_dx.is_none() { - return None; - } - - if lhs_dx.is_some() && rhs_dx.is_some() { - assert_eq!( - lhs_dx.clone().unwrap().dims(), - rhs_dx.clone().unwrap().dims() - ); - } - - if lhs_dx.is_none() { - lhs_dx = Some(MutTensorDDRC::::from_shape( - rhs_dx.clone().unwrap().dims(), - )) - } else if rhs_dx.is_none() { - rhs_dx = Some(MutTensorDDR::::from_shape( - lhs_dx.clone().unwrap().dims(), - )) - } - - Some(DijPairMV:: { - lhs: lhs_dx.unwrap(), - rhs: rhs_dx.unwrap(), - }) - } - - /// Create a dual matrix - pub fn v(val: MatF64) -> Self { - let mut dij_val = MutTensorDDRC::::from_shape([ROWS, COLS]); - for i in 0..ROWS { - for j in 0..COLS { - dij_val.mut_view().get_mut([i, j])[(i, j)] = 1.0; - } - } - - Self { - val, - dij_val: Some(dij_val), - } - } -} - -impl IsMatrix for DualM { - fn mat_mul(&self, rhs: DualM) -> DualM { - DualM { - val: self.val * rhs.val, - dij_val: DualM::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, - ), - } - } - - fn c(val: MatF64) -> Self { - Self { val, dij_val: None } - } - - fn scaled(&self, s: Dual) -> Self { - DualM { - val: self.val * s.val, - dij_val: DualM::binary_ms_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, - ), - } - } - - fn identity() -> Self { - DualM::c(MatF64::::identity()) - } - - fn get(&self, idx: (usize, usize)) -> Dual { - Dual { - val: self.val[idx], - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), - } - } - - fn from_array2(duals: [[Dual; COLS]; ROWS]) -> Self { - let mut shape = None; - let mut val_mat = MatF64::::zeros(); - for i in 0..duals.len() { - let d_rows = duals[i].clone(); - for j in 0..d_rows.len() { - let d = d_rows.clone()[j].clone(); - - val_mat[(i, j)] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); - } - } - } - - if shape.is_none() { - return DualM { - val: val_mat, - dij_val: None, - }; - } - let shape = shape.unwrap(); - - let mut r = MutTensorDDRC::::from_shape(shape); - - for i in 0..duals.len() { - let d_rows = duals[i].clone(); - for j in 0..d_rows.len() { - let d = d_rows.clone()[j].clone(); - if d.dij_val.is_some() { - for d0 in 0..shape[0] { - for d1 in 0..shape[1] { - r.mut_view().get_mut([d0, d1])[(i, j)] = - d.dij_val.clone().unwrap().get([d0, d1]); - } - } - } - } - } - DualM { - val: val_mat, - dij_val: Some(r), - } - } - - fn real(&self) -> &MatF64 { - &self.val - } - - fn block_mat2x2( - top_row: ( - >::Matrix, - >::Matrix, - ), - bot_row: ( - >::Matrix, - >::Matrix, - ), - ) -> Self { - assert_eq!(R0 + R1, ROWS); - assert_eq!(C0 + C1, COLS); - - Self::block_mat2x1( - DualM::::block_mat1x2(top_row.0, top_row.1), - DualM::::block_mat1x2(bot_row.0, bot_row.1), - ) - } - - fn block_mat2x1( - top_row: DualM, - bot_row: DualM, - ) -> Self { - assert_eq!(R0 + R1, ROWS); - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); - - Self { - val: MatF64::::block_mat2x1(top_row.val, bot_row.val), - dij_val: match maybe_dij { - Some(dij_val) => { - let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); - for d0 in 0..dij_val.shape()[0] { - for d1 in 0..dij_val.shape()[1] { - *r.mut_view().get_mut([d0, d1]) = MatF64::::block_mat2x1( - dij_val.lhs.get([d0, d1]), - dij_val.rhs.get([d0, d1]), - ); - } - } - Some(r) - } - None => None, - }, - } - } - - fn block_mat1x2( - left_col: >::Matrix, - righ_col: >::Matrix, - ) -> Self { - assert_eq!(C0 + C1, COLS); - let maybe_dij = Self::two_dx(left_col.dij_val, righ_col.dij_val); - - Self { - val: MatF64::::block_mat1x2(left_col.val, righ_col.val), - dij_val: match maybe_dij { - Some(dij_val) => { - let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); - for d0 in 0..dij_val.shape()[0] { - for d1 in 0..dij_val.shape()[1] { - *r.mut_view().get_mut([d0, d1]) = MatF64::::block_mat1x2( - dij_val.lhs.get([d0, d1]), - dij_val.rhs.get([d0, d1]), - ); - } - } - Some(r) - } - None => None, - }, - } - } - - fn get_fixed_submat( - &self, - start_r: usize, - start_c: usize, - ) -> DualM { - DualM { - val: self.val.get_fixed_submat(start_r, start_c), - dij_val: self.dij_val.clone().map(|dij_val| { - MutTensorDDRC::from_map(&dij_val.view(), |v| v.get_fixed_submat(start_r, start_c)) - }), - } - } - - fn get_col_vec(&self, start_r: usize) -> DualV { - DualV { - val: self.val.get_col_vec(start_r), - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_col_vec(start_r))), - } - } - - fn get_row_vec(&self, c: usize) -> DualV { - DualV { - val: self.val.get_row_vec(c), - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_row_vec(c))), - } - } - - fn from_c_array2(vals: [[f64; COLS]; ROWS]) -> Self { - DualM { - val: MatF64::from_c_array2(vals), - dij_val: None, - } - } -} - -impl Add for DualM { - type Output = DualM; - - fn add(self, rhs: Self) -> Self::Output { - DualM { - val: self.val + rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| *l_dij, - |r_dij| *r_dij, - ), - } - } -} - -impl Sub for DualM { - type Output = DualM; - - fn sub(self, rhs: Self) -> Self::Output { - DualM { - val: self.val - rhs.val, - dij_val: Self::binary_mm_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| *l_dij, - |r_dij| -r_dij, - ), - } - } -} - -impl Neg for DualM { - type Output = DualM; - - fn neg(self) -> Self::Output { - DualM { - val: -self.val, - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDDRC::from_map(&dij_val.view(), |v| -v)), - } - } -} - -impl IsVectorLike for DualM { - fn zero() -> Self { - Self::c(MatF64::zeros()) - } -} - -impl Mul> for DualM { - type Output = DualV; - - fn mul(self, rhs: DualV) -> Self::Output { - DualV { - val: self.val * rhs.val, - dij_val: Self::binary_mv_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| self.val * r_dij, - ), - } - } -} - -impl Debug for DualM { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { - f.debug_struct("Dual") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) - .finish() - } else { - f.debug_struct("Dual").field("val", &self.val).finish() - } - } -} - -/// Pair of dual matrices -pub struct DijPairM { - lhs: MutTensorDDRC, - rhs: MutTensorDDRC, -} - -impl - DijPairM -{ - fn shape(&self) -> [usize; 2] { - self.lhs.dims() - } -} - -/// Pair of dual matrices -pub struct DijPairMV { - /// left hand side - pub lhs: MutTensorDDRC, - /// right hand side - pub rhs: MutTensorDDR, -} - -mod test { - - #[test] - fn matrix_dual() { - use crate::dual::dual_matrix::DualM; - use crate::dual::dual_scalar::Dual; - use crate::maps::matrix_valued_maps::MatrixValuedMapFromMatrix; - use crate::types::matrix::IsMatrix; - use crate::types::scalar::IsScalar; - use crate::types::MatF64; - use sophus_tensor::view::IsTensorLike; - - let m_2x4 = MatF64::<2, 4>::new_random(); - let m_4x1 = MatF64::<4, 1>::new_random(); - - fn mat_mul_fn>(x: S::Matrix<2, 4>, y: S::Matrix<4, 1>) -> S::Matrix<2, 1> { - x.mat_mul(y) - } - let finite_diff = MatrixValuedMapFromMatrix::sym_diff_quotient( - |x| mat_mul_fn::(x, m_4x1), - m_2x4, - 1e-6, - ); - let auto_grad = MatrixValuedMapFromMatrix::fw_autodiff( - |x| mat_mul_fn::(x, DualM::c(m_4x1)), - m_2x4, - ); - - for i in 0..2 { - for j in 0..1 { - approx::assert_abs_diff_eq!( - finite_diff.get([i, j]), - auto_grad.get([i, j]), - epsilon = 0.0001 - ); - } - } - - let finite_diff = MatrixValuedMapFromMatrix::sym_diff_quotient( - |x| mat_mul_fn::(m_2x4, x), - m_4x1, - 1e-6, - ); - let auto_grad = MatrixValuedMapFromMatrix::fw_autodiff( - |x| mat_mul_fn::(DualM::c(m_2x4), x), - m_4x1, - ); - - for i in 0..2 { - for j in 0..1 { - approx::assert_abs_diff_eq!( - finite_diff.get([i, j]), - auto_grad.get([i, j]), - epsilon = 0.0001 - ); - } - } - - fn mat_mul2_fn>(x: S::Matrix<4, 4>) -> S::Matrix<4, 4> { - x.mat_mul(x.clone()) - } - - let m_4x4 = MatF64::<4, 4>::new_random(); - - let finite_diff = - MatrixValuedMapFromMatrix::sym_diff_quotient(mat_mul2_fn::, m_4x4, 1e-6); - let auto_grad = MatrixValuedMapFromMatrix::fw_autodiff(mat_mul2_fn::, m_4x4); - - for i in 0..2 { - for j in 0..1 { - approx::assert_abs_diff_eq!( - finite_diff.get([i, j]), - auto_grad.get([i, j]), - epsilon = 0.0001 - ); - } - } - } -} diff --git a/crates/sophus_calculus/src/dual/dual_scalar.rs b/crates/sophus_calculus/src/dual/dual_scalar.rs deleted file mode 100644 index 08e3d41..0000000 --- a/crates/sophus_calculus/src/dual/dual_scalar.rs +++ /dev/null @@ -1,505 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_vector::DualV; -use crate::types::scalar::IsScalar; - -use sophus_tensor::mut_tensor::InnerScalarToVec; -use sophus_tensor::mut_tensor::MutTensorDD; -use sophus_tensor::view::IsTensorLike; - -use num_traits::One; -use num_traits::Zero; -use std::fmt::Debug; -use std::ops::Add; -use std::ops::Div; -use std::ops::Mul; -use std::ops::Neg; -use std::ops::Sub; - -/// Dual number -#[derive(Clone)] -pub struct Dual { - /// value - real number - pub val: f64, - - /// derivative - infinitesimal number - pub dij_val: Option>, -} - -impl AsRef for Dual { - fn as_ref(&self) -> &Dual { - self - } -} - -impl One for Dual { - fn one() -> Self { - Dual::c(1.0) - } -} - -impl Zero for Dual { - fn zero() -> Self { - Dual::c(0.0) - } - - fn is_zero(&self) -> bool { - self.val == 0.0 - } -} - -impl Dual { - /// create a dual number - pub fn v(val: f64) -> Self { - let dij_val = MutTensorDD::::from_shape_and_val([1, 1], 1.0); - Self { - val, - dij_val: Some(dij_val), - } - } - - fn binary_dij f64, G: FnMut(&f64) -> f64>( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDD::from_map(&rhs_dij.view(), |r_dij: &f64| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDD::from_map(&lhs_dij.view(), |l_dij: &f64| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = MutTensorDD::from_map2( - &lhs_dij.view(), - &rhs_dij.view(), - |l_dij: &f64, r_dij: &f64| left_op(l_dij) + right_op(r_dij), - ); - Some(dyn_mat) - } - } - } -} - -impl Neg for Dual { - type Output = Dual; - - fn neg(self) -> Self { - Dual { - val: -self.val, - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |v: &f64| -v); - - Some(dyn_mat) - } - None => None, - }, - } - } -} - -impl PartialEq for Dual { - fn eq(&self, other: &Self) -> bool { - self.val == other.val - } -} - -impl PartialOrd for Dual { - fn partial_cmp(&self, other: &Self) -> Option { - self.val.partial_cmp(&other.val) - } -} - -impl From for Dual { - fn from(value: f64) -> Self { - Dual::c(value) - } -} - -impl Debug for Dual { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { - f.debug_struct("Dual") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) - .finish() - } else { - f.debug_struct("Dual").field("val", &self.val).finish() - } - } -} - -impl IsScalar<1> for Dual { - type Vector = DualV; - type Matrix = DualM; - - fn c(val: f64) -> Self { - Self { val, dij_val: None } - } - - fn cos(self) -> Dual { - Dual { - val: self.val.cos(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dyn_mat = - MutTensorDD::from_map(&dij_val.view(), |dij: &f64| -dij * self.val.sin()); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn sin(self) -> Dual { - Dual { - val: self.val.sin(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dyn_mat = - MutTensorDD::from_map(&dij_val.view(), |dij: &f64| dij * self.val.cos()); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn value(self) -> f64 { - self.val - } - - fn abs(self) -> Self { - Dual { - val: self.val.abs(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &f64| { - *dij * self.val.signum() - }); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn atan2(self, rhs: Self) -> Self { - let inv_sq_nrm: f64 = 1.0 / (self.val * self.val + rhs.val * rhs.val); - Dual { - val: self.val.atan2(rhs.val), - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| inv_sq_nrm * (l_dij * rhs.val), - |r_dij| -inv_sq_nrm * (self.val * r_dij), - ), - } - } - - fn real(&self) -> f64 { - self.val - } - - fn sqrt(self) -> Self { - let sqrt = self.val.sqrt(); - Dual { - val: sqrt, - dij_val: match self.dij_val { - Some(dij) => { - let out_dij = - MutTensorDD::from_map(&dij.view(), |dij: &f64| dij * 1.0 / (2.0 * sqrt)); - Some(out_dij) - } - None => None, - }, - } - } - - fn to_vec(self) -> DualV<1> { - DualV::<1> { - val: self.val.to_vec(), - dij_val: match self.dij_val { - Some(dij) => { - let tmp = dij.inner_scalar_to_vec(); - Some(tmp) - } - None => None, - }, - } - } - - fn tan(self) -> Self { - Dual { - val: self.val.tan(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let c = self.val.cos(); - let sec_squared = 1.0 / (c * c); - let dyn_mat = - MutTensorDD::from_map(&dij_val.view(), |dij: &f64| *dij * sec_squared); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn acos(self) -> Self { - Dual { - val: self.val.acos(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dval = -1.0 / (1.0 - self.val * self.val).sqrt(); - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &f64| *dij * dval); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn asin(self) -> Self { - Dual { - val: self.val.asin(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dval = 1.0 / (1.0 - self.val * self.val).sqrt(); - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &f64| *dij * dval); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn atan(self) -> Self { - Dual { - val: self.val.atan(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dval = 1.0 / (1.0 + self.val * self.val); - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &f64| *dij * dval); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn fract(self) -> Self { - Dual { - val: self.val.fract(), - dij_val: match self.dij_val.clone() { - Some(dij_val) => { - let dyn_mat = MutTensorDD::from_map(&dij_val.view(), |dij: &f64| *dij); - Some(dyn_mat) - } - None => None, - }, - } - } - - fn floor(&self) -> i64 { - self.val.floor() as i64 - } -} - -impl Add for Dual { - type Output = Dual; - fn add(self, rhs: Self) -> Self::Output { - self.add(&rhs) - } -} - -impl Add<&Dual> for Dual { - type Output = Dual; - fn add(self, rhs: &Self) -> Self::Output { - let r = self.val + rhs.val; - - Dual { - val: r, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), - } - } -} - -impl Mul for Dual { - type Output = Dual; - fn mul(self, rhs: Self) -> Self::Output { - self.mul(&rhs) - } -} - -impl Mul<&Dual> for Dual { - type Output = Dual; - fn mul(self, rhs: &Self) -> Self::Output { - let r = self.val * rhs.val; - - Dual { - val: r, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs.val, - |r_dij| r_dij * self.val, - ), - } - } -} - -impl Div for Dual { - type Output = Dual; - fn div(self, rhs: Self) -> Self::Output { - self.div(&rhs) - } -} - -impl Div<&Dual> for Dual { - type Output = Dual; - fn div(self, rhs: &Self) -> Self::Output { - let rhs_inv = 1.0 / rhs.val; - Dual { - val: self.val * rhs_inv, - dij_val: Self::binary_dij( - &self.dij_val, - &rhs.dij_val, - |l_dij| l_dij * rhs_inv, - |r_dij| -self.val * r_dij * rhs_inv * rhs_inv, - ), - } - } -} - -impl Sub for Dual { - type Output = Dual; - fn sub(self, rhs: Self) -> Self::Output { - self.sub(&rhs) - } -} - -impl Sub<&Dual> for Dual { - type Output = Dual; - fn sub(self, rhs: &Self) -> Self::Output { - Dual { - val: self.val - rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), - } - } -} - -mod test { - - #[test] - fn scalar_valued() { - use crate::dual::dual_scalar::Dual; - use crate::maps::curves::ScalarValuedCurve; - use crate::types::scalar::IsScalar; - - for i in 1..10 { - let a = 0.1 * (i as f64); - - // f(x) = x^2 - fn square_fn>(x: S) -> S { - x.clone() * x - } - let finite_diff = ScalarValuedCurve::sym_diff_quotient(square_fn, a, 1e-6); - let auto_grad = ScalarValuedCurve::fw_autodiff(square_fn, a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - { - fn add_fn>(x: S, y: S) -> S { - x + y - } - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| add_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| add_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| add_fn::(b, x), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| add_fn::(Dual::c(b), x), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - - { - fn sub_fn>(x: S, y: S) -> S { - x - y - } - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| sub_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| sub_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| sub_fn::(b, x), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| sub_fn::(Dual::c(b), x), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - - { - fn mul_fn>(x: S, y: S) -> S { - x * y - } - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| mul_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| mul_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| mul_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| mul_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - - { - fn div_fn>(x: S, y: S) -> S { - x / y - } - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| div_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| div_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| div_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| div_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| div_fn::(b, x), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| div_fn::(Dual::c(b), x), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - - let b = 12.0; - let finite_diff = - ScalarValuedCurve::sym_diff_quotient(|x| div_fn::(x, b), a, 1e-6); - let auto_grad = - ScalarValuedCurve::fw_autodiff(|x| div_fn::(x, Dual::c(b)), a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - } - } -} diff --git a/crates/sophus_calculus/src/dual/dual_vector.rs b/crates/sophus_calculus/src/dual/dual_vector.rs deleted file mode 100644 index d703643..0000000 --- a/crates/sophus_calculus/src/dual/dual_vector.rs +++ /dev/null @@ -1,423 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_scalar::Dual; -use crate::types::scalar::IsScalar; -use crate::types::vector::IsVector; -use crate::types::vector::IsVectorLike; -use crate::types::VecF64; - -use sophus_tensor::mut_tensor::InnerVecToMat; -use sophus_tensor::mut_tensor::MutTensorDD; -use sophus_tensor::mut_tensor::MutTensorDDR; -use sophus_tensor::mut_view::IsMutTensorLike; -use sophus_tensor::view::IsTensorLike; - -use std::fmt::Debug; -use std::ops::Add; -use std::ops::Neg; -use std::ops::Sub; - -/// Dual vector -#[derive(Clone)] -pub struct DualV { - /// value - real vector - pub val: VecF64, - /// derivative - infinitesimal vector - pub dij_val: Option>, -} - -impl DualV { - /// create a dual vector - pub fn v(val: VecF64) -> Self { - let mut dij_val = MutTensorDDR::::from_shape([ROWS, 1]); - for i in 0..ROWS { - dij_val.mut_view().get_mut([i, 0])[(i, 0)] = 1.0; - } - - Self { - val, - dij_val: Some(dij_val), - } - } - - fn binary_dij< - const R0: usize, - const R1: usize, - F: FnMut(&VecF64) -> VecF64, - G: FnMut(&VecF64) -> VecF64, - >( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = - MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { - left_op(l_dij) + right_op(r_dij) - }); - Some(dyn_mat) - } - } - } - - fn binary_vs_dij< - const R0: usize, - F: FnMut(&VecF64) -> VecF64, - G: FnMut(&f64) -> VecF64, - >( - lhs_dx: &Option>, - rhs_dx: &Option>, - mut left_op: F, - mut right_op: G, - ) -> Option> { - match (lhs_dx, rhs_dx) { - (None, None) => None, - (None, Some(rhs_dij)) => { - let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); - Some(out_dij) - } - (Some(lhs_dij), None) => { - let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); - Some(out_dij) - } - (Some(lhs_dij), Some(rhs_dij)) => { - let dyn_mat = - MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { - left_op(l_dij) + right_op(r_dij) - }); - Some(dyn_mat) - } - } - } - - fn two_dx( - mut lhs_dx: Option>, - mut rhs_dx: Option>, - ) -> Option> { - if lhs_dx.is_none() && rhs_dx.is_none() { - return None; - } - - if lhs_dx.is_some() && rhs_dx.is_some() { - assert_eq!( - lhs_dx.clone().unwrap().dims(), - rhs_dx.clone().unwrap().dims() - ); - } - - if lhs_dx.is_none() { - lhs_dx = Some(MutTensorDDR::from_shape(rhs_dx.clone().unwrap().dims())) - } else if rhs_dx.is_none() { - rhs_dx = Some(MutTensorDDR::from_shape(lhs_dx.clone().unwrap().dims())) - } - - Some(DijPair { - lhs: lhs_dx.unwrap(), - rhs: rhs_dx.unwrap(), - }) - } -} - -impl Neg for DualV { - type Output = DualV; - - fn neg(self) -> Self::Output { - DualV { - val: -self.val, - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| -v)), - } - } -} - -impl Sub for DualV { - type Output = DualV; - - fn sub(self, rhs: Self) -> Self::Output { - DualV { - val: self.val - rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), - } - } -} - -impl Add for DualV { - type Output = DualV; - - fn add(self, rhs: Self) -> Self::Output { - DualV { - val: self.val + rhs.val, - dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), - } - } -} - -struct DijPair { - lhs: MutTensorDDR, - rhs: MutTensorDDR, -} - -impl DijPair { - fn shape(&self) -> [usize; 2] { - self.lhs.dims() - } -} - -impl Debug for DualV { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.dij_val.is_some() { - f.debug_struct("Dual") - .field("val", &self.val) - .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) - .finish() - } else { - f.debug_struct("Dual").field("val", &self.val).finish() - } - } -} - -impl IsVectorLike for DualV { - fn zero() -> Self { - Self::c(VecF64::zeros()) - } -} - -impl IsVector for DualV { - fn set_c(&mut self, idx: usize, v: f64) { - self.val[idx] = v; - if self.dij_val.is_some() { - let dij = &mut self.dij_val.as_mut().unwrap(); - for i in 0..dij.dims()[0] { - for j in 0..dij.dims()[1] { - dij.mut_view().get_mut([i, j])[idx] = 0.0; - } - } - } - } - - fn norm(&self) -> Dual { - self.clone().dot(self.clone()).sqrt() - } - - fn squared_norm(&self) -> Dual { - self.clone().dot(self.clone()) - } - - fn get(&self, idx: usize) -> Dual { - Dual { - val: self.val[idx], - dij_val: self - .dij_val - .clone() - .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), - } - } - - fn from_array(duals: [Dual; ROWS]) -> Self { - let mut shape = None; - let mut val_v = VecF64::::zeros(); - for i in 0..duals.len() { - let d = duals.clone()[i].clone(); - - val_v[i] = d.val; - if d.dij_val.is_some() { - shape = Some(d.dij_val.clone().unwrap().dims()); - } - } - - if shape.is_none() { - return DualV { - val: val_v, - dij_val: None, - }; - } - let shape = shape.unwrap(); - - let mut r = MutTensorDDR::::from_shape(shape); - - for i in 0..duals.len() { - let d = duals.clone()[i].clone(); - if d.dij_val.is_some() { - for d0 in 0..shape[0] { - for d1 in 0..shape[1] { - r.mut_view().get_mut([d0, d1])[(i, 0)] = - d.dij_val.clone().unwrap().get([d0, d1]); - } - } - } - } - DualV { - val: val_v, - dij_val: Some(r), - } - } - - fn from_c_array(vals: [f64; ROWS]) -> Self { - DualV { - val: VecF64::from_c_array(vals), - dij_val: None, - } - } - - fn c(val: VecF64) -> Self { - Self { val, dij_val: None } - } - - fn real(&self) -> &VecF64 { - &self.val - } - - fn get_fixed_rows(&self, start: usize) -> DualV { - DualV { - val: self.val.fixed_rows::(start).into(), - dij_val: self.dij_val.clone().map(|dij_val| { - MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start).into()) - }), - } - } - - fn to_mat(self) -> DualM { - DualM:: { - val: self.val, - dij_val: self.dij_val.map(|dij| dij.inner_vec_to_mat()), - } - } - - fn block_vec2( - top_row: DualV, - bot_row: DualV, - ) -> Self { - assert_eq!(R0 + R1, ROWS); - - let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); - Self { - val: VecF64::::block_vec2(top_row.val, bot_row.val), - dij_val: match maybe_dij { - Some(dij_val) => { - let mut r = MutTensorDDR::::from_shape(dij_val.shape()); - for d0 in 0..dij_val.shape()[0] { - for d1 in 0..dij_val.shape()[1] { - *r.mut_view().get_mut([d0, d1]) = VecF64::::block_vec2( - dij_val.lhs.get([d0, d1]), - dij_val.rhs.get([d0, d1]), - ); - } - } - Some(r) - } - None => None, - }, - } - } - - fn scaled(&self, s: Dual) -> Self { - DualV { - val: self.val * s.val, - dij_val: Self::binary_vs_dij( - &self.dij_val, - &s.dij_val, - |l_dij| l_dij * s.val, - |r_dij| self.val * *r_dij, - ), - } - } - - fn dot(self, rhs: Self) -> Dual { - let mut sum = Dual::c(0.0); - - for i in 0..ROWS { - sum = sum + self.get(i) * rhs.get(i); - } - - sum - } - - fn normalized(&self) -> Self { - self.clone().scaled(Dual::c(1.0) / self.norm()) - } -} - -mod test { - - #[test] - fn scalar_valued() { - use crate::dual::dual_scalar::Dual; - use crate::dual::dual_vector::DualV; - use crate::maps::scalar_valued_maps::ScalarValuedMapFromVector; - use crate::maps::vector_valued_maps::VectorValuedMapFromVector; - use crate::points::example_points; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::VecF64; - use sophus_tensor::view::IsTensorLike; - - let points: Vec> = example_points::(); - - for p in points.clone() { - for p1 in points.clone() { - { - fn dot_fn>(x: S::Vector<4>, y: S::Vector<4>) -> S { - x.dot(y) - } - let finite_diff = ScalarValuedMapFromVector::sym_diff_quotient( - |x| dot_fn::(x, p1), - p, - 1e-6, - ); - let auto_grad = ScalarValuedMapFromVector::fw_autodiff( - |x| dot_fn::(x, DualV::<4>::c(p1)), - p, - ); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - - fn dot_fn>(x: S::Vector<4>, s: S) -> S::Vector<4> { - x.scaled(s) - } - let finite_diff = VectorValuedMapFromVector::sym_diff_quotient( - |x| dot_fn::(x, 0.99), - p, - 1e-6, - ); - let auto_grad = - VectorValuedMapFromVector::fw_autodiff(|x| dot_fn::(x, Dual::c(0.99)), p); - for i in 0..finite_diff.dims()[0] { - approx::assert_abs_diff_eq!( - finite_diff.get([i]), - auto_grad.get([i]), - epsilon = 0.0001 - ); - } - - let finite_diff = VectorValuedMapFromVector::sym_diff_quotient( - |x| dot_fn::(p1, x[0]), - p, - 1e-6, - ); - let auto_grad = VectorValuedMapFromVector::fw_autodiff( - |x| dot_fn::(DualV::c(p1), x.get(0)), - p, - ); - for i in 0..finite_diff.dims()[0] { - approx::assert_abs_diff_eq!( - finite_diff.get([i]), - auto_grad.get([i]), - epsilon = 0.0001 - ); - } - } - } - } -} diff --git a/crates/sophus_calculus/src/maps/curves.rs b/crates/sophus_calculus/src/maps/curves.rs deleted file mode 100644 index 76f2f33..0000000 --- a/crates/sophus_calculus/src/maps/curves.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_scalar::Dual; -use crate::dual::dual_vector::DualV; -use crate::types::MatF64; -use crate::types::VecF64; - -use sophus_tensor::view::IsTensorLike; - -/// A smooth curve in ℝ. -/// -/// This is a function which takes a scalar and returns a scalar: -/// -/// f: ℝ -> ℝ -pub struct ScalarValuedCurve; - -impl ScalarValuedCurve { - /// Finite difference quotient of the scalar-valued curve. - /// - /// The derivative is also a scalar. - /// - /// Since all operations are batched, the function returns a vector of scalars, i.e. a rank-1 - /// tensor. - pub fn sym_diff_quotient(curve: TFn, a: f64, h: f64) -> f64 - where - TFn: Fn(f64) -> f64, - { - (curve(a + h) - curve(a - h)) / (2.0 * h) - } - - /// Auto differentiation of the scalar-valued curve. - pub fn fw_autodiff(curve: TFn, a: f64) -> f64 - where - TFn: Fn(Dual) -> Dual, - { - curve(Dual::v(a)).dij_val.unwrap().get([0, 0]) - } -} - -/// A smooth curve in ℝʳ. -/// -/// This is a function which takes a scalar and returns a vector: -/// -/// f: ℝ -> ℝʳ -pub struct VectorValuedCurve; - -impl VectorValuedCurve { - /// Finite difference quotient of the vector-valued curve. - /// - /// The derivative is also a vector. - /// - /// Since all operations are batched, the function returns a vector of vector, i.e. a rank-2 - /// tensor. - pub fn sym_diff_quotient(curve: TFn, a: f64, h: f64) -> VecF64 - where - TFn: Fn(f64) -> VecF64, - { - (curve(a + h) - curve(a - h)) / (2.0 * h) - } - - /// Auto differentiation of the vector-valued curve. - pub fn fw_autodiff(curve: TFn, a: f64) -> VecF64 - where - TFn: Fn(Dual) -> DualV, - { - curve(Dual::v(a)).dij_val.unwrap().get([0, 0]) - } -} - -/// A smooth curve in ℝʳ x ℝᶜ. -/// -/// This is a function which takes a scalar and returns a matrix: -/// f: ℝ -> ℝʳ x ℝᶜ -pub struct MatrixValuedCurve; - -impl MatrixValuedCurve { - /// Finite difference quotient of the matrix-valued curve. - /// - /// The derivative is also a matrix. - /// - /// Since all operations are batched, the function returns a vector of matrices, i.e. a rank-3 - /// tensor. - pub fn sym_diff_quotient( - curve: TFn, - a: f64, - h: f64, - ) -> MatF64 - where - TFn: Fn(f64) -> MatF64, - { - (curve(a + h) - curve(a - h)) / (2.0 * h) - } - - /// Auto differentiation of the matrix-valued curve. - pub fn fw_autodiff( - curve: TFn, - a: f64, - ) -> MatF64 - where - TFn: Fn(Dual) -> DualM, - { - curve(Dual::v(a)).dij_val.unwrap().get([0, 0]) - } -} - -mod test { - #[cfg(test)] - use crate::types::matrix::IsMatrix; - #[cfg(test)] - use crate::types::scalar::IsScalar; - #[cfg(test)] - use crate::types::vector::IsVector; - - #[test] - fn scalar_valued() { - use super::ScalarValuedCurve; - - for i in 0..10 { - let a = 0.1 * (i as f64); - - // f(x) = x^2 - fn square_fn>(x: S) -> S { - x.clone() * x - } - let finite_diff = ScalarValuedCurve::sym_diff_quotient(square_fn, a, 1e-6); - let auto_grad = ScalarValuedCurve::fw_autodiff(square_fn, a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - } - - #[test] - fn vector_valued() { - use super::VectorValuedCurve; - - for i in 0..10 { - let a = 0.1 * (i as f64); - - // f(x) = [cos(x), sin(x)] - fn trig_fn, V2: IsVector>(x: S) -> V2 { - V2::from_array([x.clone().cos(), x.sin()]) - } - - let finite_diff = VectorValuedCurve::sym_diff_quotient(trig_fn, a, 1e-6); - let auto_grad = VectorValuedCurve::fw_autodiff(trig_fn, a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - } - - #[test] - fn matrix_valued() { - use super::MatrixValuedCurve; - - for i in 0..10 { - let a = 0.1 * (i as f64); - - // f(x) = [[ cos(x), sin(x), 0], - // [-sin(x), cos(x), 0]] - fn fn_x, M23: IsMatrix>(x: S) -> M23 { - let sin = x.clone().sin(); - let cos = x.clone().cos(); - - M23::from_array2([ - [cos.clone(), sin.clone(), S::c(0.0)], - [-sin, cos, S::c(0.0)], - ]) - } - - let finite_diff = MatrixValuedCurve::sym_diff_quotient(fn_x, a, 1e-6); - let auto_grad = MatrixValuedCurve::fw_autodiff(fn_x, a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - } -} diff --git a/crates/sophus_calculus/src/maps/matrix_valued_maps.rs b/crates/sophus_calculus/src/maps/matrix_valued_maps.rs deleted file mode 100644 index d645f7d..0000000 --- a/crates/sophus_calculus/src/maps/matrix_valued_maps.rs +++ /dev/null @@ -1,228 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_vector::DualV; -use crate::types::matrix::IsMatrix; -use crate::types::MatF64; -use crate::types::VecF64; - -use sophus_tensor::element::SMat; -use sophus_tensor::mut_tensor::MutTensorDDRC; -use sophus_tensor::mut_tensor::MutTensorDRC; -use sophus_tensor::mut_view::IsMutTensorLike; - -use std::marker::PhantomData; - -/// Matrix-valued map on a vector space. -/// -/// This is a function which takes a vector and returns a matrix: -/// -/// f: ℝᵐ -> ℝʳ x ℝᶜ -/// -pub struct MatrixValuedMapFromVector; - -impl MatrixValuedMapFromVector { - /// Finite difference quotient of the matrix-valued map. - /// - /// The derivative is a rank-3 tensor with shape (Rₒ x Cₒ x Rᵢ). - /// - /// For efficiency reasons, we return Rᵢ x [Rₒ x Cₒ] - pub fn sym_diff_quotient( - matrix_valued: TFn, - a: VecF64, - eps: f64, - ) -> MutTensorDRC - where - TFn: Fn(VecF64) -> MatF64, - { - let mut out = MutTensorDRC::::from_shape([INROWS]); - for i1 in 0..INROWS { - let mut a_plus = a; - - a_plus[i1] += eps; - - let mut a_minus = a; - a_minus[i1] -= eps; - - let val = (matrix_valued(a_plus) - matrix_valued(a_minus)).scaled(1.0 / (2.0 * eps)); - - *out.mut_view().get_mut([i1]) = val; - } - out - } - - /// Auto differentiation of the matrix-valued map. - pub fn fw_autodiff( - matrix_valued: TFn, - a: VecF64, - ) -> MutTensorDRC - where - TFn: Fn(DualV) -> DualM, - { - MutTensorDRC { - mut_array: matrix_valued(DualV::v(a)) - .dij_val - .unwrap() - .mut_array - .into_shape([INROWS]) - .unwrap(), - phantom: PhantomData, - } - } -} - -/// Matrix-valued map on a product space (=matrices). -/// -/// This is a function which takes a matrix and returns a matrix: -/// -/// f: ℝᵐ x ℝⁿ -> ℝʳ x ℝᶜ -/// -pub struct MatrixValuedMapFromMatrix; - -impl MatrixValuedMapFromMatrix { - /// Finite difference quotient of the matrix-valued map. - /// - /// The derivative is a rank-4 tensor with shape (Rₒ x Cₒ x Rᵢ x Cᵢ). - /// - /// For efficiency reasons, we return Rᵢ x Cᵢ x [Rₒ x Cₒ] - pub fn sym_diff_quotient< - TFn, - const OUTROWS: usize, - const OUTCOLS: usize, - const INROWS: usize, - const INCOLS: usize, - >( - vector_field: TFn, - a: MatF64, - eps: f64, - ) -> MutTensorDDRC - where - TFn: Fn(MatF64) -> MatF64, - { - let mut out = MutTensorDDRC::::from_shape_and_val( - [INROWS, INCOLS], - SMat::::zeros(), - ); - for i1 in 0..INROWS { - for i0 in 0..INCOLS { - let mut a_plus = a; - - a_plus[(i1, i0)] += eps; - - let mut a_minus = a; - a_minus[(i1, i0)] -= eps; - - let val = (vector_field(a_plus) - vector_field(a_minus)) / (2.0 * eps); - - *out.mut_view().get_mut([i1, i0]) = val; - } - } - out - } - - /// Auto differentiation of the matrix-valued map. - pub fn fw_autodiff< - TFn, - const OUTROWS: usize, - const OUTCOLS: usize, - const INROWS: usize, - const INCOLS: usize, - >( - matrix_valued: TFn, - a: MatF64, - ) -> MutTensorDDRC - where - TFn: Fn(DualM) -> DualM, - { - matrix_valued(DualM::v(a)).dij_val.unwrap() - } -} - -mod test { - - #[test] - fn test_batched_matrix_valued_map_from_vector() { - use crate::maps::matrix_valued_maps::MatrixValuedMapFromVector; - use crate::types::matrix::IsMatrix; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::VecF64; - use sophus_tensor::view::IsTensorLike; - - // [[ i ]] - // [[ ]] - // [[ j ]] [[ ]] - // [[ ]] [[ 0 -k j x ]] - // [[ k ]] [[ ]] - // hat [[ ]] = [[ k 0 -i y ]] - // [[ x ]] [[ ]] - // [[ ]] [[ -j i 0 z ]] - // [[ y ]] [[ ]] - // [[ ]] - // [[ z ]] - fn hat_fn, M34: IsMatrix, V6: IsVector>(v: V6) -> M34 { - let i = v.get(0); - let j = v.get(1); - let k = v.get(2); - let ni = -i.clone(); - let nj = -j.clone(); - let nk = -k.clone(); - let x = v.get(3); - let y = v.get(4); - let z = v.get(5); - - let ret: M34 = M34::from_array2([ - [S::c(0.0), nk, j, x], - [k, S::c(0.0), ni, y], - [nj, i, S::c(0.0), z], - ]); - ret - } - - let a = VecF64::<6>::new(0.1, 0.2, 0.4, 0.7, 0.8, 0.9); - - let finite_diff = MatrixValuedMapFromVector::sym_diff_quotient(hat_fn, a, 1e-6); - let auto_grad = MatrixValuedMapFromVector::fw_autodiff(hat_fn, a); - approx::assert_abs_diff_eq!( - finite_diff.view().elem_view(), - auto_grad.view().elem_view(), - epsilon = 0.0001 - ); - } - - #[test] - fn test_batched_matrix_valued_map_from_matrix() { - use crate::maps::matrix_valued_maps::MatrixValuedMapFromMatrix; - use crate::types::matrix::IsMatrix; - use crate::types::scalar::IsScalar; - use crate::types::MatF64; - use sophus_tensor::view::IsTensorLike; - - // [[ a b ]] 1 [[ d -b ]] - // inv [[ ]] = ------- [[ ]] - // [[ c d ]] ad - bc [[ -c a ]] - - fn f, M22: IsMatrix>(m: M22) -> M22 { - let a = m.get((0, 0)); - let b = m.get((0, 1)); - - let c = m.get((1, 0)); - let d = m.get((1, 1)); - - let det = S::c(1.0) / (a.clone() * d.clone() - (b.clone() * c.clone())); - - M22::from_array2([ - [det.clone() * d, -det.clone() * b], - [-det.clone() * c, det * a], - ]) - } - let a = MatF64::<2, 2>::new(0.1, 0.2, 0.4, 0.7); - - let finite_diff = MatrixValuedMapFromMatrix::sym_diff_quotient(f, a, 1e-6); - let auto_grad = MatrixValuedMapFromMatrix::fw_autodiff(f, a); - - approx::assert_abs_diff_eq!( - finite_diff.view().elem_view(), - auto_grad.view().elem_view(), - epsilon = 2.0 - ); - } -} diff --git a/crates/sophus_calculus/src/maps/scalar_valued_maps.rs b/crates/sophus_calculus/src/maps/scalar_valued_maps.rs deleted file mode 100644 index 23f3e7f..0000000 --- a/crates/sophus_calculus/src/maps/scalar_valued_maps.rs +++ /dev/null @@ -1,179 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_scalar::Dual; -use crate::dual::dual_vector::DualV; -use crate::types::MatF64; -use crate::types::VecF64; - -use sophus_tensor::mut_tensor::MutTensorDD; -use sophus_tensor::view::IsTensorLike; - -/// Scalar-valued map on a vector space. -/// -/// This is a function which takes a vector and returns a scalar: -/// -/// f: ℝᵐ -> ℝ -/// -/// These functions are also called a scalar fields (on vector spaces). -/// -pub struct ScalarValuedMapFromVector; - -impl ScalarValuedMapFromVector { - /// Finite difference quotient of the scalar-valued map. - /// - /// The derivative is a vector or rank-1 tensor of shape (Rᵢ). - /// - /// Since all operations are batched, it returns a (B x Rᵢ) rank-2 tensor. - pub fn sym_diff_quotient( - scalar_valued: TFn, - a: VecF64, - eps: f64, - ) -> VecF64 - where - TFn: Fn(VecF64) -> f64, - { - let mut out = VecF64::::zeros(); - - for r in 0..INROWS { - let mut a_plus = a; - a_plus[r] += eps; - - let mut a_minus = a; - a_minus[r] -= eps; - - out[r] = (scalar_valued(a_plus) - scalar_valued(a_minus)) / (2.0 * eps); - } - out - } - - /// Auto differentiation of the scalar-valued map. - pub fn fw_autodiff( - scalar_valued: TFn, - a: VecF64, - ) -> VecF64 - where - TFn: Fn(DualV) -> Dual, - { - let jacobian: MutTensorDD = scalar_valued(DualV::v(a)).dij_val.unwrap(); - assert_eq!(jacobian.dims(), [INROWS, 1]); - let mut out = VecF64::zeros(); - - for r in 0..jacobian.dims()[0] { - out[r] = jacobian.get([r, 0]); - } - out - } -} - -/// Scalar-valued map on a product space (= space of matrices). -/// -/// This is a function which takes a matrix and returns a scalar: -/// -/// f: ℝᵐ x ℝⁿ -> ℝ -/// -/// These functions are also called a scalar fields (on product spaces). -/// -pub struct ScalarValuedMapFromMatrix; - -impl ScalarValuedMapFromMatrix { - /// Finite difference quotient of the scalar-valued map. - /// - /// The derivative is a matrix or rank-2 tensor of shape (Rᵢ x Cⱼ). - pub fn sym_diff_quotient( - scalar_valued: TFn, - a: MatF64, - eps: f64, - ) -> MatF64 - where - TFn: Fn(MatF64) -> f64, - { - let mut out = MatF64::::zeros(); - - for r in 0..INROWS { - for c in 0..INCOLS { - let mut a_plus = a; - a_plus[(r, c)] += eps; - let mut a_minus = a; - a_minus[(r, c)] -= eps; - - out[(r, c)] = (scalar_valued(a_plus) - scalar_valued(a_minus)) / (2.0 * eps); - } - } - out - } - - /// Auto differentiation of the scalar-valued map. - pub fn fw_autodiff( - scalar_valued: TFn, - a: MatF64, - ) -> MatF64 - where - TFn: Fn(DualM) -> Dual, - { - let jacobian: MutTensorDD = scalar_valued(DualM::v(a)).dij_val.unwrap(); - assert_eq!(jacobian.dims(), [INROWS, INCOLS]); - let mut out = MatF64::zeros(); - - for r in 0..jacobian.dims()[0] { - for c in 0..jacobian.dims()[1] { - out[(r, c)] = jacobian.get([r, c]); - } - } - out - } -} - -mod test { - #[cfg(test)] - use crate::maps::scalar_valued_maps::ScalarValuedMapFromMatrix; - #[cfg(test)] - use crate::maps::scalar_valued_maps::ScalarValuedMapFromVector; - #[cfg(test)] - use crate::types::matrix::IsMatrix; - #[cfg(test)] - use crate::types::scalar::IsScalar; - #[cfg(test)] - use crate::types::vector::IsVector; - #[cfg(test)] - use crate::types::MatF64; - #[cfg(test)] - use crate::types::VecF64; - - #[test] - fn test_scalar_valued_map_from_vector() { - let a = VecF64::<2>::new(0.1, 0.4); - - fn f, V2: IsVector>(x: V2) -> S { - x.norm() - } - - let finite_diff = ScalarValuedMapFromVector::sym_diff_quotient(f, a, 1e-6); - let auto_grad = ScalarValuedMapFromVector::fw_autodiff(f, a); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } - - #[test] - fn test_batched_scalar_valued_map_from_matrix() { - // [[ a, b ]] - // det [[ c, d ]] = ad - bc - // [[ e, f ]] - - fn determinant_fn, M32: IsMatrix>(mat: M32) -> S { - let a = mat.get((0, 0)); - let b = mat.get((0, 1)); - - let c = mat.get((1, 0)); - let d = mat.get((1, 1)); - - (a * d) - (b * c) - } - - let mut mat = MatF64::<3, 2>::zeros(); - mat[(0, 0)] = 4.6; - mat[(1, 0)] = 1.6; - mat[(1, 1)] = 0.6; - - let finite_diff = ScalarValuedMapFromMatrix::sym_diff_quotient(determinant_fn, mat, 1e-6); - let auto_grad = ScalarValuedMapFromMatrix::fw_autodiff(determinant_fn, mat); - approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); - } -} diff --git a/crates/sophus_calculus/src/maps/vector_valued_maps.rs b/crates/sophus_calculus/src/maps/vector_valued_maps.rs deleted file mode 100644 index cda6f9e..0000000 --- a/crates/sophus_calculus/src/maps/vector_valued_maps.rs +++ /dev/null @@ -1,241 +0,0 @@ -use crate::dual::dual_matrix::DualM; -use crate::dual::dual_vector::DualV; -use crate::types::MatF64; -use crate::types::VecF64; - -use sophus_tensor::mut_tensor::MutTensorDDR; -use sophus_tensor::mut_tensor::MutTensorDR; -use sophus_tensor::mut_view::IsMutTensorLike; -use sophus_tensor::view::IsTensorLike; - -use std::marker::PhantomData; - -/// Vector-valued map on a vector space. -/// -/// This is a function which takes a vector and returns a vector: -/// -/// f: ℝᵐ -> ℝʳ -/// -/// These functions are also called vector fields (on vector space x s). -/// -pub struct VectorValuedMapFromVector; - -impl VectorValuedMapFromVector { - /// Finite difference quotient of the vector-valued map. - /// - /// The derivative is a matrix or rank-2 tensor with shape (Rₒ x Rᵢ). - /// - /// For efficiency reasons, we return the transpose Rᵢ x (Rₒ) - /// - pub fn sym_diff_quotient( - vector_valued: TFn, - a: VecF64, - eps: f64, - ) -> MutTensorDR - where - TFn: Fn(VecF64) -> VecF64, - { - let mut out = MutTensorDR::::from_shape([INROWS]); - - for r in 0..INROWS { - let mut a_plus = a; - a_plus[r] += eps; - - let mut a_minus = a; - a_minus[r] -= eps; - - out.get_mut([r]) - .copy_from(&((vector_valued(a_plus) - vector_valued(a_minus)) / (2.0 * eps))); - } - out - } - - /// Auto differentiation of the vector-valued map. - pub fn fw_autodiff( - vector_valued: TFn, - a: VecF64, - ) -> MutTensorDR - where - TFn: Fn(DualV) -> DualV, - { - let d = vector_valued(DualV::v(a)).dij_val; - if d.is_none() { - return MutTensorDR::from_shape([INROWS]); - } - - MutTensorDR { - mut_array: d.unwrap().mut_array.into_shape([INROWS]).unwrap(), - phantom: PhantomData, - } - } - - /// Finite difference quotient of the vector-valued map. - /// - /// The derivative is a matrix or rank-2 tensor with shape (Rₒ x Rᵢ). - /// - pub fn static_sym_diff_quotient( - vector_valued: TFn, - a: VecF64, - eps: f64, - ) -> MatF64 - where - TFn: Fn(VecF64) -> VecF64, - { - let jac = Self::sym_diff_quotient(vector_valued, a, eps); - let mut sjac = MatF64::::zeros(); - - for r in 0..INROWS { - let v = jac.get([r]); - sjac.fixed_view_mut::(0, r).copy_from(&v); - } - - sjac - } - - /// Auto differentiation of the vector-valued map. - pub fn static_fw_autodiff( - vector_valued: TFn, - a: VecF64, - ) -> MatF64 - where - TFn: Fn(DualV) -> DualV, - { - let jac = Self::fw_autodiff(vector_valued, a); - let mut sjac = MatF64::::zeros(); - - for r in 0..INROWS { - let v = jac.get([r]); - sjac.fixed_view_mut::(0, r).copy_from(&v); - } - - sjac - } -} - -/// Vector-valued map on a product space (= space of matrices). -/// -/// This is a function which takes a matrix and returns a vector: -/// -/// f: ℝᵐ x ℝⁿ -> ℝʳ -/// -/// This type of function is also called a vector field (on product spaces). -/// -pub struct VectorValuedMapFromMatrix; - -impl VectorValuedMapFromMatrix { - /// Finite difference quotient of the vector-valued map. - /// - /// The derivative is a matrix or rank-3 tensor with shape (Rₒ x Rᵢ x Cᵢ). - /// - /// For efficiency reasons, we return Rᵢ x Cᵢ x (Rₒ) - /// - pub fn sym_diff_quotient( - vector_valued: TFn, - a: MatF64, - eps: f64, - ) -> MutTensorDDR - where - TFn: Fn(MatF64) -> VecF64, - { - let mut out = MutTensorDDR::::from_shape([INROWS, INCOLS]); - - for c in 0..INCOLS { - for r in 0..INROWS { - let mut a_plus = a; - - a_plus[(r, c)] += eps; - - let mut a_minus = a; - - a_minus[(r, c)] -= eps; - - let vv = (vector_valued(a_plus) - vector_valued(a_minus)) / (2.0 * eps); - *out.mut_view().get_mut([r, c]) = vv; - } - } - out - } - - /// Auto differentiation of the vector-valued map. - pub fn fw_autodiff( - vector_valued: TFn, - a: MatF64, - ) -> MutTensorDDR - where - TFn: Fn(DualM) -> DualV, - { - vector_valued(DualM::v(a)).dij_val.unwrap() - } -} - -mod test { - - #[test] - fn test_batched_vector_valued_map_from_vector() { - use crate::maps::vector_valued_maps::VectorValuedMapFromVector; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::VecF64; - use sophus_tensor::view::IsTensorLike; - - let a = VecF64::<3>::new(0.6, 2.2, 1.1); - - // [[ x ]] [[ x / z ]] - // proj [[ y ]] = [[ ]] - // [[ z ]] [[ y / z ]] - fn proj_fn, V2: IsVector, V3: IsVector>(v: V3) -> V2 { - let x = v.get(0); - let y = v.get(1); - let z = v.get(2); - - V2::from_array([x / z.clone(), y / z]) - } - - let finite_diff = VectorValuedMapFromVector::sym_diff_quotient(proj_fn, a, 1e-6); - let auto_grad = VectorValuedMapFromVector::fw_autodiff(proj_fn, a); - for i in 0..2 { - approx::assert_abs_diff_eq!(finite_diff.get([i]), auto_grad.get([i]), epsilon = 0.0001); - } - - let sfinite_diff = VectorValuedMapFromVector::static_sym_diff_quotient(proj_fn, a, 1e-6); - let sauto_grad = VectorValuedMapFromVector::static_fw_autodiff(proj_fn, a); - approx::assert_abs_diff_eq!(sfinite_diff, sauto_grad, epsilon = 0.0001); - } - - #[test] - fn test_batched_vector_valued_map_from_matrix() { - use crate::maps::vector_valued_maps::VectorValuedMapFromMatrix; - use crate::types::matrix::IsMatrix; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::MatF64; - use sophus_tensor::view::IsTensorLike; - - fn f, M32: IsMatrix, V4: IsVector>(x: M32) -> V4 { - let a = x.get((0, 0)); - let b = x.get((0, 1)); - let c = x.get((1, 0)); - let d = x.get((1, 1)); - let e = x.get((2, 0)); - let f = x.get((2, 1)); - - V4::from_array([a + b, c + d, e + f, S::c(1.0)]) - } - - let mut mat = MatF64::<3, 2>::zeros(); - mat[(0, 0)] = -4.6; - mat[(0, 1)] = -1.6; - mat[(1, 0)] = 0.6; - mat[(1, 1)] = 1.6; - mat[(2, 0)] = -1.6; - mat[(2, 1)] = 0.2; - - let finite_diff = VectorValuedMapFromMatrix::sym_diff_quotient(f, mat, 1e-6); - let auto_grad = VectorValuedMapFromMatrix::fw_autodiff(f, mat); - approx::assert_abs_diff_eq!( - finite_diff.elem_view(), - auto_grad.elem_view(), - epsilon = 0.0001 - ); - } -} diff --git a/crates/sophus_calculus/src/points.rs b/crates/sophus_calculus/src/points.rs deleted file mode 100644 index fac4ca9..0000000 --- a/crates/sophus_calculus/src/points.rs +++ /dev/null @@ -1,29 +0,0 @@ -use crate::types::scalar::IsScalar; -use crate::types::vector::IsVector; -use crate::types::vector::IsVectorLike; -use crate::types::VecF64; - -/// Example points -pub fn example_points, const POINT: usize>() -> Vec> { - let points4 = vec![ - VecF64::<4>::from_array([0.1, 0.0, 0.0, 0.0]), - VecF64::<4>::from_array([1.0, 4.0, 1.0, 0.5]), - VecF64::<4>::from_array([0.7, 5.0, 1.1, (-5.0)]), - VecF64::<4>::from_array([1.0, 3.0, 1.0, 0.5]), - VecF64::<4>::from_array([0.7, 5.0, 0.8, (-5.0)]), - VecF64::<4>::from_array([1.0, 3.0, 1.0, 0.5]), - VecF64::<4>::from_array([-0.7, 5.0, 0.1, (-5.0)]), - VecF64::<4>::from_array([2.0, (-3.0), 1.0, 0.5]), - ]; - - let mut out: Vec> = vec![]; - for p4 in points4 { - let mut v = S::Vector::::zero(); - for i in 0..POINT.min(4) { - let val = p4[i]; - v.set_c(i, val); - } - out.push(v) - } - out -} diff --git a/crates/sophus_calculus/src/types.rs b/crates/sophus_calculus/src/types.rs deleted file mode 100644 index 12ba624..0000000 --- a/crates/sophus_calculus/src/types.rs +++ /dev/null @@ -1,24 +0,0 @@ -use simba::simd::AutoSimd; - -/// matrices -pub mod matrix; -/// parameters -pub mod params; -/// scalars -pub mod scalar; -/// vectors -pub mod vector; - -/// f64 vector -pub type VecF64 = nalgebra::SMatrix; -/// f64 matrix -pub type MatF64 = nalgebra::SMatrix; - -/// batch of f64 scalars -pub type BatchF64 = nalgebra::SMatrix, 1, 1>; -/// batch of f64 vectors -pub type BatchVecF64 = - nalgebra::SMatrix, ROWS, 1>; -/// batch of f64 matrices -pub type BatchMatF64 = - nalgebra::SMatrix, ROWS, COLS>; diff --git a/crates/sophus_calculus/src/types/matrix.rs b/crates/sophus_calculus/src/types/matrix.rs deleted file mode 100644 index b47d6e5..0000000 --- a/crates/sophus_calculus/src/types/matrix.rs +++ /dev/null @@ -1,181 +0,0 @@ -use crate::types::scalar::IsScalar; -use crate::types::vector::IsVectorLike; -use crate::types::MatF64; -use crate::types::VecF64; - -use std::fmt::Debug; -use std::ops::Mul; - -/// Matrix - either a real (f64) or a dual number matrix -pub trait IsMatrix< - S: IsScalar, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, ->: Debug + Clone + Sized + Mul, Output = S::Vector> + IsVectorLike -{ - /// create a constant matrix - fn c(val: MatF64) -> Self; - - /// return the real part - fn real(&self) -> &MatF64; - - /// return scaled matrix - fn scaled(&self, v: S) -> Self; - - /// create an identity matrix - fn identity() -> Self; - - /// create from 2d array - fn from_array2(vals: [[S; COLS]; ROWS]) -> Self; - - /// create from constant 2d array - fn from_c_array2(vals: [[f64; COLS]; ROWS]) -> Self; - - /// get element - fn get(&self, idx: (usize, usize)) -> S; - - /// create 2x1 block matrix - fn block_mat2x1( - top_row: S::Matrix, - bot_row: S::Matrix, - ) -> Self; - - /// create 1x2 block matrix - fn block_mat1x2( - left_col: S::Matrix, - righ_col: S::Matrix, - ) -> Self; - - /// create 2x2 block matrix - fn block_mat2x2( - top_row: (S::Matrix, S::Matrix), - bot_row: (S::Matrix, S::Matrix), - ) -> Self; - - /// matrix multiplication - fn mat_mul(&self, other: S::Matrix) -> S::Matrix; - - /// get fixed submatrix - fn get_fixed_submat( - &self, - start_r: usize, - start_c: usize, - ) -> S::Matrix; - - /// extract column vector - fn get_col_vec(&self, r: usize) -> S::Vector; - - /// extract row vector - fn get_row_vec(&self, r: usize) -> S::Vector; -} - -impl IsVectorLike for MatF64 { - fn zero() -> Self { - MatF64::zeros() - } -} - -impl IsMatrix for MatF64 { - fn c(val: MatF64) -> Self { - val - } - - fn from_array2(vals: [[f64; COLS]; ROWS]) -> MatF64 { - let mut m = MatF64::::zeros(); - - for c in 0..COLS { - for r in 0..ROWS { - m[(r, c)] = vals[r][c]; - } - } - m - } - - fn from_c_array2(vals: [[f64; COLS]; ROWS]) -> Self { - let mut m = MatF64::::zeros(); - for c in 0..COLS { - for r in 0..ROWS { - m[(r, c)] = vals[r][c]; - } - } - m - } - - fn get(&self, idx: (usize, usize)) -> f64 { - self[idx] - } - - fn identity() -> Self { - Self::identity() - } - - fn real(&self) -> &Self { - self - } - - fn mat_mul(&self, other: MatF64) -> MatF64 { - self * other - } - - fn block_mat2x1( - top_row: MatF64, - bot_row: MatF64, - ) -> Self { - assert_eq!(ROWS, R0 + R1); - let mut m = Self::zero(); - - m.fixed_view_mut::(0, 0).copy_from(&top_row); - m.fixed_view_mut::(R0, 0).copy_from(&bot_row); - m - } - - fn block_mat2x2( - top_row: (MatF64, MatF64), - bot_row: (MatF64, MatF64), - ) -> Self { - assert_eq!(ROWS, R0 + R1); - assert_eq!(COLS, C0 + C1); - let mut m = Self::zero(); - - m.fixed_view_mut::(0, 0).copy_from(&top_row.0); - m.fixed_view_mut::(0, C0).copy_from(&top_row.1); - - m.fixed_view_mut::(R0, 0).copy_from(&bot_row.0); - m.fixed_view_mut::(R0, C0).copy_from(&bot_row.1); - m - } - - fn block_mat1x2( - left_col: >::Matrix, - righ_col: >::Matrix, - ) -> Self { - assert_eq!(COLS, C0 + C1); - let mut m = Self::zero(); - - m.fixed_view_mut::(0, 0).copy_from(&left_col); - m.fixed_view_mut::(0, C0).copy_from(&righ_col); - - m - } - - fn get_fixed_submat( - &self, - start_r: usize, - start_c: usize, - ) -> MatF64 { - self.fixed_view::(start_r, start_c).into() - } - - fn get_col_vec(&self, c: usize) -> VecF64 { - self.fixed_view::(0, c).into() - } - - fn get_row_vec(&self, r: usize) -> VecF64 { - self.fixed_view::<1, ROWS>(0, r).transpose() - } - - fn scaled(&self, v: f64) -> Self { - self * v - } -} diff --git a/crates/sophus_calculus/src/types/params.rs b/crates/sophus_calculus/src/types/params.rs deleted file mode 100644 index 42640d7..0000000 --- a/crates/sophus_calculus/src/types/params.rs +++ /dev/null @@ -1,23 +0,0 @@ -use crate::types::scalar::IsScalar; - -/// Parameter implementation. -pub trait ParamsImpl, const PARAMS: usize, const BATCH_SIZE: usize> { - /// Is the parameter vector valid? - fn are_params_valid(params: &S::Vector) -> bool; - /// Examples of valid parameter vectors. - fn params_examples() -> Vec>; - /// Examples of invalid parameter vectors. - fn invalid_params_examples() -> Vec>; -} - -/// A trait for types that have parameters. -pub trait HasParams, const PARAMS: usize, const BATCH_SIZE: usize>: - ParamsImpl -{ - /// Create from parameters. - fn from_params(params: &S::Vector) -> Self; - /// Set parameters. - fn set_params(&mut self, params: &S::Vector); - /// Get parameters. - fn params(&self) -> &S::Vector; -} diff --git a/crates/sophus_calculus/src/types/scalar.rs b/crates/sophus_calculus/src/types/scalar.rs deleted file mode 100644 index b25b48d..0000000 --- a/crates/sophus_calculus/src/types/scalar.rs +++ /dev/null @@ -1,144 +0,0 @@ -use crate::types::matrix::IsMatrix; -use crate::types::vector::IsVector; -use crate::types::MatF64; -use crate::types::VecF64; - -use std::fmt::Debug; -use std::ops::Add; -use std::ops::Div; -use std::ops::Mul; -use std::ops::Neg; -use std::ops::Sub; - -/// Scalar - either a real (f64) or a dual number -pub trait IsScalar: - PartialOrd - + PartialEq - + Debug - + Clone - + Add - + Mul - + Div - + Sub - + Sized - + Neg - + num_traits::One - + num_traits::Zero - + From -{ - /// Vector type - type Vector: IsVector; - - /// Matrix type - type Matrix: IsMatrix; - - /// create a constant scalar - fn c(val: f64) -> Self; - - /// return the real part - fn real(&self) -> f64; - - /// absolute value - fn abs(self) -> Self; - - /// cosine - fn cos(self) -> Self; - - /// sine - fn sin(self) -> Self; - - /// tangent - fn tan(self) -> Self; - - /// arccosine - fn acos(self) -> Self; - - /// arcsine - fn asin(self) -> Self; - - /// arctangent - fn atan(self) -> Self; - - /// square root - fn sqrt(self) -> Self; - - /// arctangent2 - fn atan2(self, x: Self) -> Self; - - /// value - fn value(self) -> f64; - - /// return as a vector - fn to_vec(self) -> Self::Vector<1>; - - /// fractional part - fn fract(self) -> Self; - - /// floor - fn floor(&self) -> i64; -} - -impl IsScalar<1> for f64 { - type Vector = VecF64; - type Matrix = MatF64; - - fn abs(self) -> f64 { - f64::abs(self) - } - - fn cos(self) -> f64 { - f64::cos(self) - } - - fn sin(self) -> f64 { - f64::sin(self) - } - - fn sqrt(self) -> f64 { - f64::sqrt(self) - } - - fn c(val: f64) -> f64 { - val - } - - fn value(self) -> f64 { - self - } - - fn atan2(self, x: Self) -> Self { - self.atan2(x) - } - - fn real(&self) -> f64 { - self.value() - } - - fn to_vec(self) -> VecF64<1> { - VecF64::<1>::new(self) - } - - fn tan(self) -> Self { - self.tan() - } - - fn acos(self) -> Self { - self.acos() - } - - fn asin(self) -> Self { - self.asin() - } - - fn atan(self) -> Self { - self.atan() - } - - fn fract(self) -> Self { - f64::fract(self) - } - - fn floor(&self) -> i64 { - f64::floor(*self) as i64 - } -} diff --git a/crates/sophus_calculus/src/types/vector.rs b/crates/sophus_calculus/src/types/vector.rs deleted file mode 100644 index 88777c3..0000000 --- a/crates/sophus_calculus/src/types/vector.rs +++ /dev/null @@ -1,149 +0,0 @@ -use super::scalar::IsScalar; -use super::MatF64; -use super::VecF64; - -use std::fmt::Debug; -use std::ops::Add; -use std::ops::Neg; -use std::ops::Sub; - -/// is vector like -pub trait IsVectorLike: - Debug + Clone + Sized + Neg + Add + Sub -{ - /// create a zero vector - fn zero() -> Self; -} - -/// Vector - either a real (f64) or a dual number vector -pub trait IsVector, const ROWS: usize, const BATCH_SIZE: usize>: - IsVectorLike -{ - /// create a constant vector - fn c(val: VecF64) -> Self; - - /// return the real part - fn real(&self) -> &VecF64; - - /// squared norm - fn squared_norm(&self) -> S; - - /// norm - fn norm(&self) -> S; - - /// get ith element - fn get(&self, idx: usize) -> S; - - /// set ith element as constant - fn set_c(&mut self, idx: usize, v: f64); - - /// create a vector from an array - fn from_array(vals: [S; ROWS]) -> Self; - - /// create a constant vector from an array - fn from_c_array(vals: [f64; ROWS]) -> Self; - - /// return scaled vector - fn scaled(&self, v: S) -> Self; - - /// get fixed rows - fn get_fixed_rows(&self, start: usize) -> S::Vector; - - /// return the matrix representation - fn to_mat(self) -> S::Matrix; - - /// create a block vector - fn block_vec2( - top_row: S::Vector, - bot_row: S::Vector, - ) -> Self; - - /// dot product - fn dot(self, rhs: Self) -> S; - - /// return normalized vector - fn normalized(&self) -> Self; -} - -impl IsVector for VecF64 { - fn from_array(vals: [f64; ROWS]) -> VecF64 { - VecF64::::from_row_slice(&vals[..]) - } - - fn from_c_array(vals: [f64; ROWS]) -> Self { - VecF64::::from_row_slice(&vals[..]) - } - - fn get(&self, idx: usize) -> f64 { - self[idx] - } - - fn norm(&self) -> f64 { - self.norm() - } - - fn squared_norm(&self) -> f64 { - self.norm_squared() - } - - fn c(val: VecF64) -> Self { - val - } - - fn real(&self) -> &Self { - self - } - - fn get_fixed_rows(&self, start: usize) -> VecF64 { - self.fixed_rows::(start).into() - } - - fn to_mat(self) -> MatF64 { - self - } - - fn block_vec2( - top_row: VecF64, - bot_row: VecF64, - ) -> Self { - assert_eq!(ROWS, R0 + R1); - let mut m = Self::zero(); - - m.fixed_view_mut::(0, 0).copy_from(&top_row); - m.fixed_view_mut::(R0, 0).copy_from(&bot_row); - m - } - - fn set_c(&mut self, idx: usize, v: f64) { - self[idx] = v; - } - - fn scaled(&self, v: f64) -> Self { - self * v - } - - fn dot(self, rhs: Self) -> f64 { - VecF64::dot(&self, &rhs) - } - - fn normalized(&self) -> Self { - self.normalize() - } -} - -/// cross product -pub fn cross>(lhs: S::Vector<3>, rhs: S::Vector<3>) -> S::Vector<3> { - let l0 = lhs.get(0); - let l1 = lhs.get(1); - let l2 = lhs.get(2); - - let r0 = rhs.get(0); - let r1 = rhs.get(1); - let r2 = rhs.get(2); - - S::Vector::from_array([ - l1.clone() * r2.clone() - l2.clone() * r1.clone(), - l2 * r0.clone() - l0.clone() * r2, - l0 * r1 - l1 * r0, - ]) -} diff --git a/crates/sophus_tensor/Cargo.toml b/crates/sophus_core/Cargo.toml similarity index 86% rename from crates/sophus_tensor/Cargo.toml rename to crates/sophus_core/Cargo.toml index c3c58d7..c4786f2 100644 --- a/crates/sophus_tensor/Cargo.toml +++ b/crates/sophus_core/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "sophus - geometry for robotics and computer vision" -name = "sophus_tensor" +name = "sophus_core" readme = "../../README.md" edition.workspace = true @@ -11,10 +11,11 @@ repository.workspace = true version.workspace = true [dependencies] +approx.workspace = true assertables.workspace = true concat-arrays.workspace = true nalgebra.workspace = true ndarray.workspace = true num-traits.workspace = true -simba.workspace = true +sleef.workspace = true typenum.workspace = true diff --git a/crates/sophus_calculus/src/lib.rs b/crates/sophus_core/src/calculus.rs similarity index 65% rename from crates/sophus_calculus/src/lib.rs rename to crates/sophus_core/src/calculus.rs index cc52bfd..95f9e09 100644 --- a/crates/sophus_calculus/src/lib.rs +++ b/crates/sophus_core/src/calculus.rs @@ -1,17 +1,13 @@ #![deny(missing_docs)] //! # Calculus module -/// Dual numbers - for automatic differentiation +/// dual numbers - for automatic differentiation pub mod dual; /// manifolds pub mod manifold; /// curves, scalar-valued, vector-valued, and matrix-valued maps pub mod maps; -/// points -pub mod points; /// intervals and regions pub mod region; /// splines pub mod spline; -/// scalar, vector, and matrix types -pub mod types; diff --git a/crates/sophus_core/src/calculus/dual.rs b/crates/sophus_core/src/calculus/dual.rs new file mode 100644 index 0000000..ada118e --- /dev/null +++ b/crates/sophus_core/src/calculus/dual.rs @@ -0,0 +1,6 @@ +///! DualScalar matrix. +pub mod dual_matrix; +///! DualScalar scalar. +pub mod dual_scalar; +///! DualScalar vector. +pub mod dual_vector; diff --git a/crates/sophus_core/src/calculus/dual/dual_matrix.rs b/crates/sophus_core/src/calculus/dual/dual_matrix.rs new file mode 100644 index 0000000..d30cb55 --- /dev/null +++ b/crates/sophus_core/src/calculus/dual/dual_matrix.rs @@ -0,0 +1,1461 @@ +use super::dual_scalar::IsDual; +use super::dual_scalar::IsDualScalar; +use super::dual_vector::DualVector; +use crate::calculus::dual::dual_scalar::DualBatchScalar; +use crate::calculus::dual::dual_scalar::DualScalar; +use crate::calculus::dual::dual_vector::DualBatchVector; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::matrix::IsSingleMatrix; +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::BatchMatF64; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::MatF64; +use crate::linalg::VecF64; +use crate::tensor::mut_tensor::MutTensorDD; +use crate::tensor::mut_tensor::MutTensorDDR; +use crate::tensor::mut_tensor::MutTensorDDRC; +use crate::tensor::mut_tensor_view::IsMutTensorLike; +use crate::tensor::tensor_view::IsTensorLike; +use approx::AbsDiffEq; +use approx::RelativeEq; +use num_traits::Zero; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::Mul; +use std::ops::Neg; +use std::ops::Sub; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::SupportedLaneCount; + +/// DualScalarLike matrix +#[derive(Clone)] +pub struct DualMatrix { + /// value - real matrix + pub val: MatF64, + /// derivative - infinitesimal matrix + pub dij_val: Option>, +} + +/// DualScalarLike matrix +#[derive(Clone)] +pub struct DualBatchMatrix +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + /// value - real matrix + pub val: BatchMatF64, + /// derivative - infinitesimal matrix + pub dij_val: Option, ROWS, COLS>>, +} + +impl IsSingleMatrix + for DualMatrix +{ +} + +/// Trait for scalar dual numbers +pub trait IsDualMatrix< + S: IsDualScalar, + const ROWS: usize, + const COLS: usize, + const BATCH: usize, +>: IsMatrix + IsDual +{ + /// Create a new dual number + fn new(val: S::RealMatrix) -> Self; + + /// Get the derivative + fn dij_val(self) -> Option>; +} + +impl IsDual for DualMatrix {} + +impl IsDualMatrix + for DualMatrix +{ + /// Create a new dual number + fn new(val: MatF64) -> Self { + DualMatrix { val, dij_val: None } + } + + /// Get the derivative + fn dij_val(self) -> Option> { + self.dij_val + } +} + +/// Pair of dual matrices +pub struct DijPairM< + S: IsCoreScalar, + const ROWS: usize, + const COLS: usize, + const ROWS2: usize, + const COLS2: usize, +> { + lhs: MutTensorDDRC, + rhs: MutTensorDDRC, +} + +/// Pair of dual matrices +pub struct DijPairMV { + /// left hand side + pub lhs: MutTensorDDRC, + /// right hand side + pub rhs: MutTensorDDR, +} + +impl< + S: IsCoreScalar, + const ROWS: usize, + const COLS: usize, + const ROWS2: usize, + const COLS2: usize, + > DijPairM +{ + fn shape(&self) -> [usize; 2] { + self.lhs.dims() + } +} + +impl DualMatrix { + pub(crate) fn binary_mm_dij< + const R0: usize, + const R1: usize, + const C0: usize, + const C1: usize, + F: FnMut(&MatF64) -> MatF64, + G: FnMut(&MatF64) -> MatF64, + >( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_mv_dij< + const R0: usize, + const R1: usize, + const C0: usize, + F: FnMut(&MatF64) -> VecF64, + G: FnMut(&VecF64) -> VecF64, + >( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_ms_dij< + const R0: usize, + const C0: usize, + F: FnMut(&MatF64) -> MatF64, + G: FnMut(&f64) -> MatF64, + >( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + /// derivatives + pub fn two_dx( + mut lhs_dx: Option>, + mut rhs_dx: Option>, + ) -> Option> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some(MutTensorDDRC::::from_shape( + rhs_dx.clone().unwrap().dims(), + )) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDRC::::from_shape( + lhs_dx.clone().unwrap().dims(), + )) + } + + Some(DijPairM { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } + + /// derivatives + pub fn two_dx_from_vec( + mut lhs_dx: Option>, + mut rhs_dx: Option>, + ) -> Option> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some(MutTensorDDRC::::from_shape( + rhs_dx.clone().unwrap().dims(), + )) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDR::::from_shape( + lhs_dx.clone().unwrap().dims(), + )) + } + + Some(DijPairMV:: { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } + + /// Create a dual matrix + pub fn v(val: MatF64) -> Self { + let mut dij_val = MutTensorDDRC::::from_shape([ROWS, COLS]); + for i in 0..ROWS { + for j in 0..COLS { + dij_val.mut_view().get_mut([i, j])[(i, j)] = 1.0; + } + } + + Self { + val, + dij_val: Some(dij_val), + } + } +} + +impl PartialEq for DualMatrix { + fn eq(&self, other: &Self) -> bool { + self.val == other.val && self.dij_val == other.dij_val + } +} + +impl AbsDiffEq for DualMatrix { + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.val.abs_diff_eq(&other.val, epsilon) + } +} + +impl RelativeEq for DualMatrix { + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.val.relative_eq(&other.val, epsilon, max_relative) + } +} + +impl IsMatrix + for DualMatrix +{ + fn mat_mul(&self, rhs: DualMatrix) -> DualMatrix { + DualMatrix { + val: self.val * rhs.val, + dij_val: DualMatrix::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| l_dij * rhs.val, + |r_dij| self.val * r_dij, + ), + } + } + + fn from_scalar(val: DualScalar) -> Self { + DualMatrix { + val: MatF64::::from_scalar(val.val), + dij_val: val.dij_val.map(|dij_val| { + MutTensorDDRC::from_map(&dij_val.view(), |v| MatF64::::from_scalar(*v)) + }), + } + } + + fn from_real_matrix(val: MatF64) -> Self { + Self { val, dij_val: None } + } + + fn scaled(&self, s: DualScalar) -> Self { + DualMatrix { + val: self.val * s.val, + dij_val: DualMatrix::binary_ms_dij( + &self.dij_val, + &s.dij_val, + |l_dij| l_dij * s.val, + |r_dij| self.val * *r_dij, + ), + } + } + + fn identity() -> Self { + DualMatrix::from_real_matrix(MatF64::::identity()) + } + + fn get_elem(&self, idx: [usize; 2]) -> DualScalar { + DualScalar { + val: self.val.get_elem(idx), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[(idx[0], idx[1])])), + } + } + + fn from_array2(duals: [[DualScalar; COLS]; ROWS]) -> Self { + let mut shape = None; + let mut val_mat = MatF64::::zeros(); + for i in 0..duals.len() { + let d_rows = duals[i].clone(); + for j in 0..d_rows.len() { + let d = d_rows.clone()[j].clone(); + + val_mat[(i, j)] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + } + + if shape.is_none() { + return DualMatrix { + val: val_mat, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDRC::::from_shape(shape); + + for i in 0..duals.len() { + let d_rows = duals[i].clone(); + for j in 0..d_rows.len() { + let d = d_rows.clone()[j].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, j)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + } + DualMatrix { + val: val_mat, + dij_val: Some(r), + } + } + + fn real_matrix(&self) -> &MatF64 { + &self.val + } + + fn block_mat2x2( + top_row: (DualMatrix, DualMatrix), + bot_row: (DualMatrix, DualMatrix), + ) -> Self { + assert_eq!(R0 + R1, ROWS); + assert_eq!(C0 + C1, COLS); + + Self::block_mat2x1( + DualMatrix::::block_mat1x2(top_row.0, top_row.1), + DualMatrix::::block_mat1x2(bot_row.0, bot_row.1), + ) + } + + fn block_mat2x1( + top_row: DualMatrix, + bot_row: DualMatrix, + ) -> Self { + assert_eq!(R0 + R1, ROWS); + let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + + Self { + val: MatF64::::block_mat2x1(top_row.val, bot_row.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = MatF64::::block_mat2x1( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn block_mat1x2( + left_col: DualMatrix, + righ_col: DualMatrix, + ) -> Self { + assert_eq!(C0 + C1, COLS); + let maybe_dij = Self::two_dx(left_col.dij_val, righ_col.dij_val); + + Self { + val: MatF64::::block_mat1x2(left_col.val, righ_col.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = MutTensorDDRC::::from_shape(dij_val.shape()); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = MatF64::::block_mat1x2( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn get_fixed_submat( + &self, + start_r: usize, + start_c: usize, + ) -> DualMatrix { + DualMatrix { + val: self.val.get_fixed_submat(start_r, start_c), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDRC::from_map(&dij_val.view(), |v| v.get_fixed_submat(start_r, start_c)) + }), + } + } + + fn get_col_vec(&self, start_r: usize) -> DualVector { + DualVector { + val: self.val.get_col_vec(start_r), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_col_vec(start_r))), + } + } + + fn get_row_vec(&self, c: usize) -> DualVector { + DualVector { + val: self.val.get_row_vec(c), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_row_vec(c))), + } + } + + fn from_real_array2(vals: [[f64; COLS]; ROWS]) -> Self { + DualMatrix { + val: MatF64::from_real_array2(vals), + dij_val: None, + } + } + + fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { + DualMatrix { + val: MatF64::from_real_array2(vals), + dij_val: None, + } + } + + fn from_f64(val: f64) -> Self { + DualMatrix { + val: MatF64::::from_f64(val), + dij_val: None, + } + } + + fn set_col_vec(&mut self, c: usize, v: DualVector) { + self.val.set_col_vec(c, v.val); + todo!(); + } + + fn to_dual(self) -> >::DualMatrix { + self + } + + fn select(self, mask: &bool, other: Self) -> Self { + if *mask { + self + } else { + other + } + } + + fn set_elem(&mut self, idx: [usize; 2], val: DualScalar) { + self.val.set_elem(idx, val.val); + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[(idx[0], idx[1])] = + val.dij_val.clone().unwrap().get([i, j]); + } + } + } + } +} + +impl Add for DualMatrix { + type Output = DualMatrix; + + fn add(self, rhs: Self) -> Self::Output { + DualMatrix { + val: self.val + rhs.val, + dij_val: Self::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), + } + } +} + +impl Sub for DualMatrix { + type Output = DualMatrix; + + fn sub(self, rhs: Self) -> Self::Output { + DualMatrix { + val: self.val - rhs.val, + dij_val: Self::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij, + |r_dij| -r_dij, + ), + } + } +} + +impl Neg for DualMatrix { + type Output = DualMatrix; + + fn neg(self) -> Self::Output { + DualMatrix { + val: -self.val, + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDRC::from_map(&dij_val.view(), |v| -v)), + } + } +} + +impl Zero for DualMatrix { + fn zero() -> Self { + Self::from_real_matrix(MatF64::zeros()) + } + + fn is_zero(&self) -> bool { + self.val.is_zero() + } +} + +impl Mul> for DualMatrix { + type Output = DualVector; + + fn mul(self, rhs: DualVector) -> Self::Output { + Self::Output { + val: self.val * rhs.val, + dij_val: Self::binary_mv_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| l_dij * rhs.val, + |r_dij| self.val * r_dij, + ), + } + } +} + +impl Debug for DualMatrix { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.dij_val.is_some() { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .finish() + } else { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .finish() + } + } +} + +impl IsDual + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ +} + +impl + IsDualMatrix, ROWS, COLS, BATCH> for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + /// Create a new dual number + fn new(val: BatchMatF64) -> Self { + DualBatchMatrix { val, dij_val: None } + } + + /// Get the derivative + fn dij_val(self) -> Option, ROWS, COLS>> { + self.dij_val + } +} + +impl DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn binary_mm_dij< + const R0: usize, + const R1: usize, + const C0: usize, + const C1: usize, + F: FnMut(&BatchMatF64) -> BatchMatF64, + G: FnMut(&BatchMatF64) -> BatchMatF64, + >( + lhs_dx: &Option, R0, C0>>, + rhs_dx: &Option, R1, C1>>, + mut left_op: F, + mut right_op: G, + ) -> Option, ROWS, COLS>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_mv_dij< + const R0: usize, + const R1: usize, + const C0: usize, + F: FnMut(&BatchMatF64) -> BatchVecF64, + G: FnMut(&BatchVecF64) -> BatchVecF64, + >( + lhs_dx: &Option, R0, C0>>, + rhs_dx: &Option, R1>>, + mut left_op: F, + mut right_op: G, + ) -> Option, ROWS>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_ms_dij< + const R0: usize, + const C0: usize, + F: FnMut(&BatchMatF64) -> BatchMatF64, + G: FnMut(&BatchScalarF64) -> BatchMatF64, + >( + lhs_dx: &Option, R0, C0>>, + rhs_dx: &Option>>, + mut left_op: F, + mut right_op: G, + ) -> Option, ROWS, COLS>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDRC::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDRC::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDRC::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + /// derivatives + pub fn two_dx( + mut lhs_dx: Option, R1, C1>>, + mut rhs_dx: Option, R2, C2>>, + ) -> Option, R1, C1, R2, C2>> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some(MutTensorDDRC::, R1, C1>::from_shape( + rhs_dx.clone().unwrap().dims(), + )) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDRC::, R2, C2>::from_shape( + lhs_dx.clone().unwrap().dims(), + )) + } + + Some(DijPairM { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } + + /// derivatives + pub fn two_dx_from_vec( + mut lhs_dx: Option, ROWS, COLS>>, + mut rhs_dx: Option, COLS>>, + ) -> Option, ROWS, COLS>> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some( + MutTensorDDRC::, ROWS, COLS>::from_shape( + rhs_dx.clone().unwrap().dims(), + ), + ) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDR::, COLS>::from_shape( + lhs_dx.clone().unwrap().dims(), + )) + } + + Some(DijPairMV::, ROWS, COLS> { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } + + /// Create a dual matrix + pub fn v(val: BatchMatF64) -> Self { + let mut dij_val = + MutTensorDDRC::, ROWS, COLS>::from_shape([ROWS, COLS]); + for i in 0..ROWS { + for j in 0..COLS { + dij_val.mut_view().get_mut([i, j])[(i, j)] = BatchScalarF64::::from_f64(1.0); + } + } + + Self { + val, + dij_val: Some(dij_val), + } + } +} + +impl PartialEq + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn eq(&self, other: &Self) -> bool { + self.val == other.val && self.dij_val == other.dij_val + } +} + +impl AbsDiffEq + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.val.abs_diff_eq(&other.val, epsilon) + } +} + +impl RelativeEq + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.val.relative_eq(&other.val, epsilon, max_relative) + } +} + +impl + IsMatrix, ROWS, COLS, BATCH> for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn from_f64(val: f64) -> Self { + DualBatchMatrix { + val: BatchMatF64::::from_f64(val), + dij_val: None, + } + } + + fn set_elem(&mut self, idx: [usize; 2], val: DualBatchScalar) { + self.val.set_elem(idx, val.val); + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[(idx[0], idx[1])] = + val.dij_val.clone().unwrap().get([i, j]); + } + } + } + } + + fn from_scalar(val: DualBatchScalar) -> Self { + DualBatchMatrix { + val: BatchMatF64::::from_scalar(val.val), + dij_val: val.dij_val.map(|dij_val| { + MutTensorDDRC::from_map(&dij_val.view(), |v| { + BatchMatF64::::from_scalar(*v) + }) + }), + } + } + + fn mat_mul( + &self, + rhs: DualBatchMatrix, + ) -> DualBatchMatrix { + DualBatchMatrix { + val: self.val * rhs.val, + dij_val: DualBatchMatrix::::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| l_dij * rhs.val, + |r_dij| self.val * r_dij, + ), + } + } + + fn from_real_matrix(val: BatchMatF64) -> Self { + Self { val, dij_val: None } + } + + fn scaled(&self, s: DualBatchScalar) -> Self { + DualBatchMatrix { + val: self.val * s.val, + dij_val: DualBatchMatrix::::binary_ms_dij( + &self.dij_val, + &s.dij_val, + |l_dij| l_dij * s.val, + |r_dij| self.val * *r_dij, + ), + } + } + + fn identity() -> Self { + DualBatchMatrix::from_real_matrix(BatchMatF64::::identity()) + } + + fn get_elem(&self, idx: [usize; 2]) -> DualBatchScalar { + DualBatchScalar:: { + val: self.val.get_elem(idx), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[(idx[0], idx[1])])), + } + } + + fn from_array2(duals: [[DualBatchScalar; COLS]; ROWS]) -> Self { + let mut shape = None; + let mut val_mat = BatchMatF64::::zeros(); + for i in 0..duals.len() { + let d_rows = duals[i].clone(); + for j in 0..d_rows.len() { + let d = d_rows.clone()[j].clone(); + + val_mat[(i, j)] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + } + + if shape.is_none() { + return DualBatchMatrix { + val: val_mat, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape(shape); + + for i in 0..duals.len() { + let d_rows = duals[i].clone(); + for j in 0..d_rows.len() { + let d = d_rows.clone()[j].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, j)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + } + DualBatchMatrix { + val: val_mat, + dij_val: Some(r), + } + } + + fn real_matrix(&self) -> &BatchMatF64 { + &self.val + } + + fn block_mat2x2( + top_row: ( + DualBatchMatrix, + DualBatchMatrix, + ), + bot_row: ( + DualBatchMatrix, + DualBatchMatrix, + ), + ) -> Self { + assert_eq!(R0 + R1, ROWS); + assert_eq!(C0 + C1, COLS); + + Self::block_mat2x1( + DualBatchMatrix::::block_mat1x2(top_row.0, top_row.1), + DualBatchMatrix::::block_mat1x2(bot_row.0, bot_row.1), + ) + } + + fn block_mat2x1( + top_row: DualBatchMatrix, + bot_row: DualBatchMatrix, + ) -> Self { + assert_eq!(R0 + R1, ROWS); + let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + + Self { + val: BatchMatF64::::block_mat2x1(top_row.val, bot_row.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape( + dij_val.shape(), + ); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = + BatchMatF64::::block_mat2x1( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn block_mat1x2( + left_col: DualBatchMatrix, + righ_col: DualBatchMatrix, + ) -> Self { + assert_eq!(C0 + C1, COLS); + let maybe_dij = Self::two_dx(left_col.dij_val, righ_col.dij_val); + + Self { + val: BatchMatF64::::block_mat1x2(left_col.val, righ_col.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = MutTensorDDRC::, ROWS, COLS>::from_shape( + dij_val.shape(), + ); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = + BatchMatF64::::block_mat1x2( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn get_fixed_submat( + &self, + start_r: usize, + start_c: usize, + ) -> DualBatchMatrix { + DualBatchMatrix { + val: self.val.get_fixed_submat(start_r, start_c), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDRC::from_map(&dij_val.view(), |v| v.get_fixed_submat(start_r, start_c)) + }), + } + } + + fn get_col_vec(&self, start_r: usize) -> DualBatchVector { + DualBatchVector { + val: self.val.get_col_vec(start_r), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_col_vec(start_r))), + } + } + + fn get_row_vec(&self, c: usize) -> DualBatchVector { + DualBatchVector { + val: self.val.get_row_vec(c), + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| v.get_row_vec(c))), + } + } + + fn from_real_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { + DualBatchMatrix { + val: BatchMatF64::from_real_array2(vals), + dij_val: None, + } + } + + fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { + DualBatchMatrix { + val: BatchMatF64::from_f64_array2(vals), + dij_val: None, + } + } + + fn set_col_vec( + &mut self, + c: usize, + v: as IsScalar>::Vector, + ) { + self.val.set_col_vec(c, v.val); + todo!(); + } + + fn to_dual(self) -> as IsScalar>::DualMatrix { + self + } + + fn select(self, mask: &Mask, other: Self) -> Self { + let maybe_dij = Self::two_dx(self.dij_val, other.dij_val); + + DualBatchMatrix { + val: self.val.select(mask, other.val), + dij_val: match maybe_dij { + Some(dij) => { + let mut r = + MutTensorDDRC::, ROWS, COLS>::from_shape(dij.shape()); + for i in 0..dij.shape()[0] { + for j in 0..dij.shape()[1] { + *r.get_mut([i, j]) = + dij.lhs.get([i, j]).select(mask, dij.rhs.get([i, j])); + } + } + Some(r) + } + _ => None, + }, + } + } +} + +impl Add + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchMatrix; + + fn add(self, rhs: Self) -> Self::Output { + DualBatchMatrix { + val: self.val + rhs.val, + dij_val: Self::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij, + |r_dij| *r_dij, + ), + } + } +} + +impl Sub + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchMatrix; + + fn sub(self, rhs: Self) -> Self::Output { + DualBatchMatrix { + val: self.val - rhs.val, + dij_val: Self::binary_mm_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij, + |r_dij| -r_dij, + ), + } + } +} + +impl Neg + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchMatrix; + + fn neg(self) -> Self::Output { + DualBatchMatrix { + val: -self.val, + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDRC::from_map(&dij_val.view(), |v| -v)), + } + } +} + +impl Zero + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn zero() -> Self { + Self::from_real_matrix(BatchMatF64::zeros()) + } + + fn is_zero(&self) -> bool { + self.val.is_zero() + } +} + +impl Mul> + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchVector; + + fn mul(self, rhs: DualBatchVector) -> Self::Output { + Self::Output { + val: self.val * rhs.val, + dij_val: Self::binary_mv_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| l_dij * rhs.val, + |r_dij| self.val * r_dij, + ), + } + } +} + +impl Debug + for DualBatchMatrix +where + LaneCount: SupportedLaneCount, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.dij_val.is_some() { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .finish() + } else { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .finish() + } + } +} + +#[test] +fn dual_matrix_tests() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::maps::matrix_valued_maps::MatrixValuedMapFromMatrix; + use crate::linalg::BatchScalarF64; + + #[cfg(test)] + trait Test { + fn run(); + } + + macro_rules! def_test_template { + ( $scalar:ty, $dual_scalar: ty, $batch:literal + ) => { + #[cfg(test)] + impl Test for $scalar { + fn run() { + let m_2x4 = <$scalar as IsScalar<$batch>>::Matrix::<2, 4>::from_f64_array2([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + ]); + let m_4x1 = <$scalar as IsScalar<$batch>>::Matrix::<4, 1>::from_f64_array2([ + [1.0], + [2.0], + [3.0], + [4.0], + ]); + + fn mat_mul_fn, const BATCH: usize>( + x: S::Matrix<2, 4>, + y: S::Matrix<4, 1>, + ) -> S::Matrix<2, 1> { + x.mat_mul(y) + } + let finite_diff = + MatrixValuedMapFromMatrix::<$scalar, $batch>::sym_diff_quotient( + |x| mat_mul_fn::<$scalar, $batch>(x, m_4x1), + m_2x4, + 1e-6, + ); + let auto_grad = MatrixValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + |x| { + mat_mul_fn::<$dual_scalar, $batch>( + x, + <$dual_scalar as IsScalar<$batch>>::Matrix::from_real_matrix(m_4x1), + ) + }, + m_2x4, + ); + + for i in 0..2 { + for j in 0..1 { + approx::assert_abs_diff_eq!( + finite_diff.get([i, j]), + auto_grad.get([i, j]), + epsilon = 0.0001 + ); + } + } + + let finite_diff = MatrixValuedMapFromMatrix::sym_diff_quotient( + |x| mat_mul_fn::<$scalar, $batch>(m_2x4, x), + m_4x1, + 1e-6, + ); + let auto_grad = MatrixValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + |x| { + mat_mul_fn::<$dual_scalar, $batch>( + <$dual_scalar as IsScalar<$batch>>::Matrix::from_real_matrix(m_2x4), + x, + ) + }, + m_4x1, + ); + + for i in 0..2 { + for j in 0..1 { + approx::assert_abs_diff_eq!( + finite_diff.get([i, j]), + auto_grad.get([i, j]), + epsilon = 0.0001 + ); + } + } + + fn mat_mul2_fn, const BATCH: usize>( + x: S::Matrix<4, 4>, + ) -> S::Matrix<4, 4> { + x.mat_mul(x.clone()) + } + + let m_4x4 = <$scalar as IsScalar<$batch>>::Matrix::<4, 4>::from_f64_array2([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + ]); + + let finite_diff = + MatrixValuedMapFromMatrix::<$scalar, $batch>::sym_diff_quotient( + mat_mul2_fn::<$scalar, $batch>, + m_4x4, + 1e-6, + ); + let auto_grad = MatrixValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + mat_mul2_fn::<$dual_scalar, $batch>, + m_4x4, + ); + + for i in 0..2 { + for j in 0..1 { + approx::assert_abs_diff_eq!( + finite_diff.get([i, j]), + auto_grad.get([i, j]), + epsilon = 0.0001 + ); + } + } + } + } + }; + } + + def_test_template!(f64, DualScalar, 1); + def_test_template!(BatchScalarF64<2>, DualBatchScalar<2>, 2); + def_test_template!(BatchScalarF64<4>, DualBatchScalar<4>, 4); + + f64::run(); + BatchScalarF64::<2>::run(); + BatchScalarF64::<4>::run(); +} diff --git a/crates/sophus_core/src/calculus/dual/dual_scalar.rs b/crates/sophus_core/src/calculus/dual/dual_scalar.rs new file mode 100644 index 0000000..ebddeba --- /dev/null +++ b/crates/sophus_core/src/calculus/dual/dual_scalar.rs @@ -0,0 +1,1441 @@ +use super::dual_matrix::DualBatchMatrix; +use super::dual_matrix::DualMatrix; +use super::dual_vector::DualBatchVector; +use super::dual_vector::DualVector; +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::scalar::IsSingleScalar; +use crate::linalg::scalar::NumberCategory; +use crate::linalg::BatchMatF64; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::MatF64; +use crate::linalg::VecF64; +use crate::tensor::mut_tensor::InnerScalarToVec; +use crate::tensor::mut_tensor::MutTensorDD; +use crate::tensor::tensor_view::IsTensorLike; +use approx::assert_abs_diff_eq; +use approx::AbsDiffEq; +use approx::RelativeEq; +use num_traits::One; +use num_traits::Zero; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::AddAssign; +use std::ops::Div; +use std::ops::Mul; +use std::ops::Neg; +use std::ops::Sub; +use std::ops::SubAssign; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::SupportedLaneCount; + +/// Trait for dual numbers +pub trait IsDual {} + +/// Dual number - a real number and an infinitesimal number +#[derive(Clone)] +pub struct DualScalar { + /// value - real number + pub val: f64, + + /// derivative - infinitesimal number + pub dij_val: Option>, +} + +impl IsDual for DualScalar {} + +/// Dual number - a real number and an infinitesimal number (batch version) +#[derive(Clone)] +pub struct DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + /// value - real number + pub val: BatchScalarF64, + + /// derivative - infinitesimal number + pub dij_val: Option>>, +} + +impl IsDual for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ +} + +/// Trait for scalar dual numbers +pub trait IsDualScalar: IsScalar + IsDual { + /// Create a new dual number + fn new(val: Self::RealScalar) -> Self; + + /// Create a vector of dual numbers + fn vector_v(val: Self::RealVector) -> Self::DualVector; + + /// Create a matrix of dual numbers + fn matrix_v( + val: Self::RealMatrix, + ) -> Self::DualMatrix; + + /// Get the derivative + fn dij_val(self) -> Option>; +} + +impl AbsDiffEq for DualScalar { + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + 1e-6 + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.val.abs_diff_eq(&other.val, epsilon) + } +} + +impl RelativeEq for DualScalar { + fn default_max_relative() -> Self::Epsilon { + 1e-6 + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.val.relative_eq(&other.val, epsilon, max_relative) + } +} + +impl IsCoreScalar for DualScalar { + fn number_category() -> NumberCategory { + NumberCategory::Real + } +} + +impl SubAssign for DualScalar { + fn sub_assign(&mut self, rhs: Self) { + *self = self.clone().sub(&rhs); + } +} + +impl IsSingleScalar for DualScalar { + type SingleVector = DualVector; + type SingleMatrix = DualMatrix; + + fn single_real_scalar(&self) -> f64 { + self.val + } + + fn single_scalar(&self) -> Self { + self.clone() + } + + fn i64_floor(&self) -> i64 { + self.val.floor() as i64 + } +} + +impl AsRef for DualScalar { + fn as_ref(&self) -> &DualScalar { + self + } +} + +impl One for DualScalar { + fn one() -> Self { + ::from_f64(1.0) + } +} + +impl Zero for DualScalar { + fn zero() -> Self { + ::from_f64(0.0) + } + + fn is_zero(&self) -> bool { + self.val == ::from_f64(0.0).real_part() + } +} + +impl IsDualScalar<1> for DualScalar { + fn new(val: f64) -> Self { + let dij_val = >::from_shape_and_val([1, 1], 1.0); + Self { + val, + dij_val: Some(dij_val), + } + } + + fn vector_v(val: Self::RealVector) -> Self::Vector { + DualVector::::v(val) + } + + fn dij_val(self) -> Option> { + self.dij_val + } + + fn matrix_v( + val: Self::RealMatrix, + ) -> Self::Matrix { + DualMatrix::::v(val) + } +} + +impl DualScalar { + /// create a dual number + fn binary_dij f64, G: FnMut(&f64) -> f64>( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = + >::from_map(&rhs_dij.view(), |r_dij: &f64| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = + >::from_map(&lhs_dij.view(), |l_dij: &f64| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = >::from_map2( + &lhs_dij.view(), + &rhs_dij.view(), + |l_dij: &f64, r_dij: &f64| left_op(l_dij) + right_op(r_dij), + ); + Some(dyn_mat) + } + } + } +} + +impl Neg for DualScalar { + type Output = DualScalar; + + fn neg(self) -> Self { + Self { + val: -self.val, + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = >::from_map(&dij_val.view(), |v: &f64| -(*v)); + + Some(dyn_mat) + } + None => None, + }, + } + } +} + +impl PartialEq for DualScalar { + fn eq(&self, other: &Self) -> bool { + self.val == other.val + } +} + +impl PartialOrd for DualScalar { + fn partial_cmp(&self, other: &Self) -> Option { + self.val.partial_cmp(&other.val) + } +} + +impl From for DualScalar { + fn from(value: f64) -> Self { + Self::from_real_scalar(value) + } +} + +impl Debug for DualScalar { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.dij_val.is_some() { + f.debug_struct("DualScalar") + .field("val", &self.val) + .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .finish() + } else { + f.debug_struct("DualScalar") + .field("val", &self.val) + .finish() + } + } +} + +impl IsScalar<1> for DualScalar { + type Scalar = DualScalar; + type RealScalar = f64; + type SingleScalar = DualScalar; + + type RealMatrix = MatF64; + type RealVector = VecF64; + + type Vector = DualVector; + type Matrix = DualMatrix; + + type Mask = bool; + + fn from_real_scalar(val: f64) -> Self { + Self { val, dij_val: None } + } + + fn from_real_array(arr: [f64; 1]) -> Self { + Self::from_f64(arr[0]) + } + + fn real_array(&self) -> [f64; 1] { + [self.val] + } + + fn cos(self) -> DualScalar { + Self { + val: self.val.cos(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { + -(*dij) * self.val.sin() + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn sin(self) -> DualScalar { + Self { + val: self.val.sin(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { + *dij * self.val.cos() + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn abs(self) -> Self { + Self { + val: self.val.abs(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { + *dij * self.val.signum() + }); + + Some(dyn_mat) + } + None => None, + }, + } + } + + fn atan2(self, rhs: Self) -> Self { + let inv_sq_nrm: f64 = 1.0 / (self.val * self.val + rhs.val * rhs.val); + Self { + val: self.val.atan2(rhs.val), + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| inv_sq_nrm * ((*l_dij) * rhs.val), + |r_dij| -inv_sq_nrm * (self.val * (*r_dij)), + ), + } + } + + fn real_part(&self) -> f64 { + self.val + } + + fn sqrt(self) -> Self { + let sqrt = self.val.sqrt(); + Self { + val: sqrt, + dij_val: match self.dij_val { + Some(dij) => { + let out_dij = >::from_map(&dij.view(), |dij: &f64| { + (*dij) * 1.0 / (2.0 * sqrt) + }); + Some(out_dij) + } + None => None, + }, + } + } + + fn to_vec(self) -> DualVector<1> { + DualVector::<1> { + val: self.val.real_part().to_vec(), + dij_val: match self.dij_val { + Some(dij) => { + let tmp = dij.inner_scalar_to_vec(); + Some(tmp) + } + None => None, + }, + } + } + + fn tan(self) -> Self { + Self { + val: self.val.tan(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let c = self.val.cos(); + let sec_squared = 1.0 / (c * c); + let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| { + *dij * sec_squared + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn acos(self) -> Self { + Self { + val: self.val.acos(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = -1.0 / (1.0 - self.val * self.val).sqrt(); + let dyn_mat = + >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn asin(self) -> Self { + Self { + val: self.val.asin(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = 1.0 / (1.0 - self.val * self.val).sqrt(); + let dyn_mat = + >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn atan(self) -> Self { + Self { + val: self.val.atan(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = 1.0 / (1.0 + self.val * self.val); + let dyn_mat = + >::from_map(&dij_val.view(), |dij: &f64| *dij * dval); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn fract(self) -> Self { + Self { + val: self.val.fract(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = >::from_map(&dij_val.view(), |dij: &f64| *dij); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn floor(&self) -> f64 { + self.val.floor() + } + + fn from_f64(val: f64) -> Self { + Self { val, dij_val: None } + } + + fn scalar_examples() -> Vec { + [1.0, 2.0, 3.0].iter().map(|&v| Self::from_f64(v)).collect() + } + + fn extract_single(&self, _i: usize) -> Self::SingleScalar { + self.clone() + } + + fn signum(&self) -> Self { + Self { + val: self.val.signum(), + dij_val: None, + } + } + + type DualScalar = Self; + + type DualVector = DualVector; + + type DualMatrix = DualMatrix; + + fn less_equal(&self, rhs: &Self) -> Self::Mask { + self.val.less_equal(&rhs.val) + } + + fn to_dual(self) -> Self::DualScalar { + self + } + + fn select(self, mask: &Self::Mask, other: Self) -> Self { + if *mask { + self + } else { + other + } + } + + fn greater_equal(&self, rhs: &Self) -> Self::Mask { + self.val.greater_equal(&rhs.val) + } +} + +impl AddAssign for DualScalar { + fn add_assign(&mut self, rhs: Self) { + // this is a bit inefficient, better to do it in place + *self = self.clone().add(&rhs); + } +} + +impl Add for DualScalar { + type Output = DualScalar; + fn add(self, rhs: Self) -> Self::Output { + self.add(&rhs) + } +} + +impl Add<&DualScalar> for DualScalar { + type Output = DualScalar; + fn add(self, rhs: &Self) -> Self::Output { + let r = self.val + rhs.val; + + Self { + val: r, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + } + } +} + +impl Mul for DualScalar { + type Output = DualScalar; + fn mul(self, rhs: Self) -> Self::Output { + self.mul(&rhs) + } +} + +impl Mul<&DualScalar> for DualScalar { + type Output = DualScalar; + fn mul(self, rhs: &Self) -> Self::Output { + let r = self.val * rhs.val; + + Self { + val: r, + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| (*l_dij) * rhs.val, + |r_dij| (*r_dij) * self.val, + ), + } + } +} + +impl Div for DualScalar { + type Output = DualScalar; + fn div(self, rhs: Self) -> Self::Output { + self.div(&rhs) + } +} + +impl Div<&DualScalar> for DualScalar { + type Output = DualScalar; + fn div(self, rhs: &Self) -> Self::Output { + let rhs_inv = 1.0 / rhs.val; + Self { + val: self.val * rhs_inv, + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| l_dij * rhs_inv, + |r_dij| -self.val * r_dij * rhs_inv * rhs_inv, + ), + } + } +} + +impl Sub for DualScalar { + type Output = DualScalar; + fn sub(self, rhs: Self) -> Self::Output { + self.sub(&rhs) + } +} + +impl Sub<&DualScalar> for DualScalar { + type Output = DualScalar; + fn sub(self, rhs: &Self) -> Self::Output { + Self { + val: self.val - rhs.val, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + } + } +} + +impl IsDualScalar for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn new(val: BatchScalarF64) -> Self { + let dij_val = MutTensorDD::from_shape_and_val([1, 1], BatchScalarF64::::ones()); + Self { + val, + dij_val: Some(dij_val), + } + } + + fn dij_val(self) -> Option>> { + self.dij_val + } + + fn vector_v(val: Self::RealVector) -> Self::Vector { + DualBatchVector::::v(val) + } + + fn matrix_v( + val: Self::RealMatrix, + ) -> Self::Matrix { + DualBatchMatrix::::v(val) + } +} + +impl IsCoreScalar for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn number_category() -> NumberCategory { + NumberCategory::Real + } +} + +impl AsRef> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn as_ref(&self) -> &DualBatchScalar + where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, + { + self + } +} + +impl One for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn one() -> Self { + >::from_f64(1.0) + } +} + +impl Zero for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn zero() -> Self { + >::from_f64(0.0) + } + + fn is_zero(&self) -> bool { + self.val == >::from_f64(0.0).real_part() + } +} + +impl DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn binary_dij< + F: FnMut(&BatchScalarF64) -> BatchScalarF64, + G: FnMut(&BatchScalarF64) -> BatchScalarF64, + >( + lhs_dx: &Option>>, + rhs_dx: &Option>>, + mut left_op: F, + mut right_op: G, + ) -> Option>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = + MutTensorDD::from_map(&rhs_dij.view(), |r_dij: &BatchScalarF64| { + right_op(r_dij) + }); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = + MutTensorDD::from_map(&lhs_dij.view(), |l_dij: &BatchScalarF64| { + left_op(l_dij) + }); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = MutTensorDD::from_map2( + &lhs_dij.view(), + &rhs_dij.view(), + |l_dij: &BatchScalarF64, r_dij: &BatchScalarF64| { + left_op(l_dij) + right_op(r_dij) + }, + ); + Some(dyn_mat) + } + } + } +} + +impl Neg for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + + fn neg(self) -> Self { + Self { + val: -self.val, + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |v: &BatchScalarF64| -*v); + + Some(dyn_mat) + } + None => None, + }, + } + } +} + +impl PartialEq for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn eq(&self, other: &Self) -> bool { + self.val == other.val + } +} + +impl AbsDiffEq for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + for i in 0..BATCH { + if !self + .val + .extract_single(i) + .abs_diff_eq(&other.val.extract_single(i), epsilon.extract_single(i)) + { + return false; + } + } + true + } +} + +impl RelativeEq for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + for i in 0..BATCH { + if !self.val.extract_single(i).relative_eq( + &other.val.extract_single(i), + epsilon.extract_single(i), + max_relative.extract_single(i), + ) { + return false; + } + } + true + } +} + +impl From> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn from(value: BatchScalarF64) -> Self { + Self::from_real_scalar(value) + } +} + +impl Debug for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.dij_val.is_some() { + f.debug_struct("DualScalar") + .field("val", &self.val) + .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .finish() + } else { + f.debug_struct("DualScalar") + .field("val", &self.val) + .finish() + } + } +} + +impl IsScalar for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Scalar = DualBatchScalar; + type RealScalar = BatchScalarF64; + type SingleScalar = DualScalar; + type DualVector = DualBatchVector; + type DualMatrix = DualBatchMatrix; + type RealVector = BatchVecF64; + type RealMatrix = BatchMatF64; + type Vector = DualBatchVector; + type Matrix = DualBatchMatrix; + + type Mask = Mask; + + fn from_real_scalar(val: BatchScalarF64) -> Self { + Self { val, dij_val: None } + } + + fn scalar_examples() -> Vec { + [1.0, 2.0, 3.0].iter().map(|&v| Self::from_f64(v)).collect() + } + + fn from_real_array(arr: [f64; BATCH]) -> Self { + Self::from_real_scalar(BatchScalarF64::::from_real_array(arr)) + } + + fn real_array(&self) -> [f64; BATCH] { + self.val.real_array() + } + + fn extract_single(&self, i: usize) -> Self::SingleScalar { + Self::SingleScalar { + val: self.val.extract_single(i), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + dij.extract_single(i) + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn cos(self) -> DualBatchScalar + where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, + { + Self { + val: self.val.cos(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + -*dij * self.val.sin() + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn signum(&self) -> Self { + Self { + val: self.val.signum(), + dij_val: None, + } + } + + fn sin(self) -> DualBatchScalar + where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, + { + Self { + val: self.val.sin(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * self.val.cos() + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn abs(self) -> Self { + Self { + val: self.val.abs(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * self.val.signum() + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn atan2(self, rhs: Self) -> Self { + let inv_sq_nrm: BatchScalarF64 = + BatchScalarF64::ones() / (self.val * self.val + rhs.val * rhs.val); + Self { + val: self.val.atan2(rhs.val), + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| inv_sq_nrm * ((*l_dij) * rhs.val), + |r_dij| -inv_sq_nrm * (self.val * (*r_dij)), + ), + } + } + + fn real_part(&self) -> BatchScalarF64 { + self.val + } + + fn sqrt(self) -> Self { + let sqrt = self.val.sqrt(); + Self { + val: sqrt, + dij_val: match self.dij_val { + Some(dij) => { + let out_dij = + MutTensorDD::from_map(&dij.view(), |dij: &BatchScalarF64| { + *dij * BatchScalarF64::::from_f64(1.0) + / (BatchScalarF64::::from_f64(2.0) * sqrt) + }); + Some(out_dij) + } + None => None, + }, + } + } + + fn to_vec(self) -> DualBatchVector<1, BATCH> { + DualBatchVector::<1, BATCH> { + val: self.val.real_part().to_vec(), + dij_val: match self.dij_val { + Some(dij) => { + let tmp = dij.inner_scalar_to_vec(); + Some(tmp) + } + None => None, + }, + } + } + + fn tan(self) -> Self { + Self { + val: self.val.tan(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let c = self.val.cos(); + let sec_squared = BatchScalarF64::::ones() / (c * c); + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * sec_squared + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn acos(self) -> Self { + Self { + val: self.val.acos(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = -BatchScalarF64::::ones() + / (BatchScalarF64::::ones() - self.val * self.val).sqrt(); + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * dval + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn asin(self) -> Self { + Self { + val: self.val.asin(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = BatchScalarF64::::ones() + / (BatchScalarF64::::ones() - self.val * self.val).sqrt(); + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * dval + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn atan(self) -> Self { + Self { + val: self.val.atan(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dval = BatchScalarF64::::ones() + / (BatchScalarF64::::ones() + self.val * self.val); + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| { + *dij * dval + }); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn fract(self) -> Self { + Self { + val: self.val.fract(), + dij_val: match self.dij_val.clone() { + Some(dij_val) => { + let dyn_mat = + MutTensorDD::from_map(&dij_val.view(), |dij: &BatchScalarF64| *dij); + Some(dyn_mat) + } + None => None, + }, + } + } + + fn floor(&self) -> BatchScalarF64 { + self.val.floor() + } + + fn from_f64(val: f64) -> Self { + Self::from_real_scalar(BatchScalarF64::::from_f64(val)) + } + + type DualScalar = Self; + + fn scalar(self) -> Self { + self + } + + fn ones() -> Self { + Self::from_f64(1.0) + } + + fn zeros() -> Self { + Self::from_f64(0.0) + } + + fn test_suite() { + let examples = Self::scalar_examples(); + for a in &examples { + let sin_a = a.clone().sin(); + let cos_a = a.clone().cos(); + let val = sin_a.clone() * sin_a + cos_a.clone() * cos_a; + let one = Self::ones(); + + for i in 0..BATCH { + assert_abs_diff_eq!(val.extract_single(i), one.extract_single(i)); + } + } + } + + fn less_equal(&self, rhs: &Self) -> Self::Mask { + self.val.less_equal(&rhs.val) + } + + fn to_dual(self) -> Self::DualScalar { + self + } + + fn select(self, mask: &Self::Mask, other: Self) -> Self { + Self { + val: self.val.select(mask, other.val), + dij_val: match (self.dij_val, other.dij_val) { + (Some(lhs), Some(rhs)) => { + let dyn_mat = MutTensorDD::from_map2( + &lhs.view(), + &rhs.view(), + |l: &BatchScalarF64, r: &BatchScalarF64| l.select(mask, *r), + ); + Some(dyn_mat) + } + _ => None, + }, + } + } + + fn greater_equal(&self, rhs: &Self) -> Self::Mask { + self.val.greater_equal(&rhs.val) + } +} + +impl AddAssign> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn add_assign(&mut self, rhs: Self) { + *self = self.clone().add(&rhs); + } +} +impl SubAssign> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn sub_assign(&mut self, rhs: Self) { + *self = self.clone().sub(&rhs); + } +} + +impl Add> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn add(self, rhs: Self) -> Self::Output { + self.add(&rhs) + } +} + +impl Add<&DualBatchScalar> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn add(self, rhs: &Self) -> Self::Output { + let r = self.val + rhs.val; + + Self { + val: r, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + } + } +} + +impl Mul> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn mul(self, rhs: Self) -> Self::Output { + self.mul(&rhs) + } +} + +impl Mul<&DualBatchScalar> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn mul(self, rhs: &Self) -> Self::Output { + let r = self.val * rhs.val; + + Self { + val: r, + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| (*l_dij) * rhs.val, + |r_dij| (*r_dij) * self.val, + ), + } + } +} + +impl Div> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn div(self, rhs: Self) -> Self::Output { + self.div(&rhs) + } +} + +impl Div<&DualBatchScalar> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn div(self, rhs: &Self) -> Self::Output { + let rhs_inv = BatchScalarF64::::ones() / rhs.val; + Self { + val: self.val * rhs_inv, + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij * rhs_inv, + |r_dij| -self.val * (*r_dij) * rhs_inv * rhs_inv, + ), + } + } +} + +impl Sub> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn sub(self, rhs: Self) -> Self::Output { + self.sub(&rhs) + } +} + +impl Sub<&DualBatchScalar> for DualBatchScalar +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchScalar; + fn sub(self, rhs: &Self) -> Self::Output { + Self { + val: self.val - rhs.val, + dij_val: Self::binary_dij( + &self.dij_val, + &rhs.dij_val, + |l_dij| *l_dij, + |r_dij| -(*r_dij), + ), + } + } +} + +#[test] +fn dual_scalar_tests() { + use crate::calculus::maps::curves::ScalarValuedCurve; + + trait DualScalarTest { + fn run_dual_scalar_test(); + } + macro_rules! def_dual_scalar_test_template { + ($batch:literal, $scalar: ty, $dual_scalar: ty) => { + impl DualScalarTest for $dual_scalar { + fn run_dual_scalar_test() { + let b = <$scalar>::from_f64(12.0); + for i in 1..10 { + let a: $scalar = <$scalar>::from_f64(0.1 * (i as f64)); + + // f(x) = x^2 + fn square_fn(x: $scalar) -> $scalar { + x.clone() * x + } + fn dual_square_fn(x: $dual_scalar) -> $dual_scalar { + x.clone() * x + } + let finite_diff = ScalarValuedCurve::sym_diff_quotient(square_fn, a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff(dual_square_fn, a); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + { + fn add_fn(x: $scalar, y: $scalar) -> $scalar { + x + y + } + fn dual_add_fn(x: $dual_scalar, y: $dual_scalar) -> $dual_scalar { + x + y + } + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| add_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_add_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| add_fn(b, x), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_add_fn(<$dual_scalar>::from_real_scalar(b), x), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + + { + fn sub_fn(x: $scalar, y: $scalar) -> $scalar { + x - y + } + fn dual_sub_fn(x: $dual_scalar, y: $dual_scalar) -> $dual_scalar { + x - y + } + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| sub_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_sub_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| sub_fn(b, x), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_sub_fn(<$dual_scalar>::from_real_scalar(b), x), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + + { + fn mul_fn(x: $scalar, y: $scalar) -> $scalar { + x * y + } + fn dual_mul_fn(x: $dual_scalar, y: $dual_scalar) -> $dual_scalar { + x * y + } + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| mul_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_mul_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| mul_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_mul_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + + fn div_fn(x: $scalar, y: $scalar) -> $scalar { + x / y + } + fn dual_div_fn(x: $dual_scalar, y: $dual_scalar) -> $dual_scalar { + x / y + } + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| div_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_div_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| div_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_div_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| div_fn(b, x), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_div_fn(<$dual_scalar>::from_real_scalar(b), x), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + let finite_diff = + ScalarValuedCurve::sym_diff_quotient(|x| div_fn(x, b), a, 1e-6); + let auto_grad = ScalarValuedCurve::fw_autodiff( + |x| dual_div_fn(x, <$dual_scalar>::from_real_scalar(b)), + a, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + } + } + }; + } + + def_dual_scalar_test_template!(1, f64, DualScalar); + def_dual_scalar_test_template!(2, BatchScalarF64<2>, DualBatchScalar<2>); + def_dual_scalar_test_template!(4, BatchScalarF64<4>, DualBatchScalar<4>); + def_dual_scalar_test_template!(8, BatchScalarF64<8>, DualBatchScalar<8>); + + DualBatchScalar::<2>::run_dual_scalar_test(); + DualBatchScalar::<4>::run_dual_scalar_test(); + DualBatchScalar::<8>::run_dual_scalar_test(); +} diff --git a/crates/sophus_core/src/calculus/dual/dual_vector.rs b/crates/sophus_core/src/calculus/dual/dual_vector.rs new file mode 100644 index 0000000..5ad83b0 --- /dev/null +++ b/crates/sophus_core/src/calculus/dual/dual_vector.rs @@ -0,0 +1,1176 @@ +use approx::AbsDiffEq; +use approx::RelativeEq; + +use super::dual_matrix::DualMatrix; +use super::dual_scalar::DualBatchScalar; +use super::dual_scalar::DualScalar; +use super::dual_scalar::IsDual; +use super::dual_scalar::IsDualScalar; +use crate::calculus::dual::dual_matrix::DualBatchMatrix; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::vector::IsSingleVector; +use crate::linalg::vector::IsVector; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::VecF64; +use crate::tensor::mut_tensor::InnerVecToMat; +use crate::tensor::mut_tensor::MutTensorDD; +use crate::tensor::mut_tensor::MutTensorDDR; +use crate::tensor::mut_tensor_view::IsMutTensorLike; +use crate::tensor::tensor_view::IsTensorLike; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::Neg; +use std::ops::Sub; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::SupportedLaneCount; + +/// Dual vector +#[derive(Clone)] +pub struct DualVector { + /// value - real vector + pub val: VecF64, + /// derivative - infinitesimal vector + pub dij_val: Option>, +} + +/// Trait for scalar dual numbers +pub trait IsDualVector, const ROWS: usize, const BATCH: usize>: + IsVector + IsDual +{ + /// Create a new dual number + fn new(val: S::RealVector) -> Self; + + /// Get the derivative + fn dij_val(self) -> Option>; +} + +/// Dual vector (batch version) +#[derive(Clone, Debug)] +pub struct DualBatchVector +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + /// value - real vector + pub val: BatchVecF64, + /// derivative - infinitesimal vector + pub dij_val: Option, ROWS>>, +} + +impl IsDual for DualVector {} + +impl IsDualVector for DualVector { + fn new(val: VecF64) -> Self { + DualVector::v(val) + } + + fn dij_val(self) -> Option> { + self.dij_val + } +} + +impl num_traits::Zero for DualVector { + fn zero() -> Self { + DualVector { + val: VecF64::zeros(), + dij_val: None, + } + } + + fn is_zero(&self) -> bool { + self.val == VecF64::::zeros() + } +} + +impl IsSingleVector for DualVector +where + DualVector: IsVector, +{ + fn set_real_scalar(&mut self, idx: usize, v: f64) { + self.val[idx] = v; + } +} + +struct DijPair { + lhs: MutTensorDDR, + rhs: MutTensorDDR, +} + +impl DijPair { + fn shape(&self) -> [usize; 2] { + self.lhs.dims() + } +} + +impl DualVector { + /// create a dual vector + pub fn v(val: VecF64) -> Self { + let mut dij_val = MutTensorDDR::::from_shape([ROWS, 1]); + for i in 0..ROWS { + dij_val.mut_view().get_mut([i, 0])[(i, 0)] = 1.0; + } + + Self { + val, + dij_val: Some(dij_val), + } + } + + fn binary_dij< + const R0: usize, + const R1: usize, + F: FnMut(&VecF64) -> VecF64, + G: FnMut(&VecF64) -> VecF64, + >( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_vs_dij< + const R0: usize, + F: FnMut(&VecF64) -> VecF64, + G: FnMut(&f64) -> VecF64, + >( + lhs_dx: &Option>, + rhs_dx: &Option>, + mut left_op: F, + mut right_op: G, + ) -> Option> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn two_dx( + mut lhs_dx: Option>, + mut rhs_dx: Option>, + ) -> Option> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some(MutTensorDDR::from_shape(rhs_dx.clone().unwrap().dims())) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDR::from_shape(lhs_dx.clone().unwrap().dims())) + } + + Some(DijPair { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } +} + +impl Neg for DualVector { + type Output = DualVector; + + fn neg(self) -> Self::Output { + DualVector { + val: -self.val, + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| -v)), + } + } +} + +impl Sub for DualVector { + type Output = DualVector; + + fn sub(self, rhs: Self) -> Self::Output { + DualVector { + val: self.val - rhs.val, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + } + } +} + +impl Add for DualVector { + type Output = DualVector; + + fn add(self, rhs: Self) -> Self::Output { + DualVector { + val: self.val + rhs.val, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + } + } +} + +impl Debug for DualVector { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.dij_val.is_some() { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .field("dij_val", &self.dij_val.as_ref().unwrap().elem_view()) + .finish() + } else { + f.debug_struct("DualScalarLike") + .field("val", &self.val) + .finish() + } + } +} + +impl PartialEq for DualVector { + fn eq(&self, other: &Self) -> bool { + self.val == other.val && self.dij_val == other.dij_val + } +} + +impl AbsDiffEq for DualVector { + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.val.abs_diff_eq(&other.val, epsilon) + } +} + +impl RelativeEq for DualVector { + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.val.relative_eq(&other.val, epsilon, max_relative) + } +} + +impl IsVector for DualVector { + fn from_f64(val: f64) -> Self { + DualVector { + val: VecF64::::from_scalar(val), + dij_val: None, + } + } + + fn set_real_elem(&mut self, idx: usize, v: f64) { + self.val[idx] = v; + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[idx] = 0.0; + } + } + } + } + + fn norm(&self) -> DualScalar { + self.clone().dot(self.clone()).sqrt() + } + + fn squared_norm(&self) -> DualScalar { + self.clone().dot(self.clone()) + } + + fn get_elem(&self, idx: usize) -> DualScalar { + DualScalar { + val: self.val[idx], + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), + } + } + + fn from_array(duals: [DualScalar; ROWS]) -> Self { + let mut shape = None; + let mut val_v = VecF64::::zeros(); + for i in 0..duals.len() { + let d = duals.clone()[i].clone(); + + val_v[i] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + + if shape.is_none() { + return DualVector { + val: val_v, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDR::::from_shape(shape); + + for i in 0..duals.len() { + let d = duals.clone()[i].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, 0)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + DualVector { + val: val_v, + dij_val: Some(r), + } + } + + fn from_real_array(vals: [f64; ROWS]) -> Self { + DualVector { + val: VecF64::from_real_array(vals), + dij_val: None, + } + } + + fn from_real_vector(val: VecF64) -> Self { + Self { val, dij_val: None } + } + + fn real_vector(&self) -> &VecF64 { + &self.val + } + + fn get_fixed_rows(&self, start: usize) -> DualVector { + DualVector { + val: self.val.fixed_rows::(start).into(), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start).into()) + }), + } + } + + fn to_mat(self) -> DualMatrix { + DualMatrix { + val: self.val, + dij_val: self.dij_val.map(|dij| dij.inner_vec_to_mat()), + } + } + + fn block_vec2( + top_row: DualVector, + bot_row: DualVector, + ) -> Self { + assert_eq!(R0 + R1, ROWS); + + let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + Self { + val: VecF64::::block_vec2(top_row.val, bot_row.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = MutTensorDDR::::from_shape(dij_val.shape()); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = VecF64::::block_vec2( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn scaled(&self, s: DualScalar) -> Self { + DualVector { + val: self.val * s.val, + dij_val: Self::binary_vs_dij( + &self.dij_val, + &s.dij_val, + |l_dij| l_dij * s.val, + |r_dij| self.val * *r_dij, + ), + } + } + + fn dot(self, rhs: Self) -> DualScalar { + let mut sum = ::from_f64(0.0); + + for i in 0..ROWS { + sum += self.get_elem(i) * rhs.get_elem(i); + } + + sum + } + + fn normalized(&self) -> Self { + self.clone() + .scaled(::from_f64(1.0) / self.norm()) + } + + fn from_f64_array(vals: [f64; ROWS]) -> Self { + DualVector { + val: VecF64::from_f64_array(vals), + dij_val: None, + } + } + + fn from_scalar_array(vals: [DualScalar; ROWS]) -> Self { + let mut shape = None; + let mut val_v = VecF64::::zeros(); + for i in 0..vals.len() { + let d = vals.clone()[i].clone(); + + val_v[i] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + + if shape.is_none() { + return DualVector { + val: val_v, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDR::::from_shape(shape); + + for i in 0..vals.len() { + let d = vals.clone()[i].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, 0)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + DualVector { + val: val_v, + dij_val: Some(r), + } + } + + fn set_elem(&mut self, idx: usize, v: DualScalar) { + self.val[idx] = v.val; + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[idx] = v.dij_val.clone().unwrap().get([i, j]); + } + } + } + } + + fn vector(self) -> Self { + self.clone() + } + + fn to_dual(self) -> >::DualVector { + self + } + + fn outer( + self, + rhs: DualVector, + ) -> >::Matrix { + let mut out = DualMatrix::::zeros(); + for i in 0..ROWS { + for j in 0..R2 { + out.set_elem([i, j], self.get_elem(i) * rhs.get_elem(j)); + } + } + out + } + + fn select(self, mask: &bool, other: Self) -> Self { + if *mask { + self + } else { + other + } + } + + fn get_fixed_subvec(&self, start_r: usize) -> DualVector { + DualVector { + val: self.val.fixed_rows::(start_r).into(), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start_r).into()) + }), + } + } +} + +impl num_traits::Zero for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + fn zero() -> Self { + DualBatchVector { + val: BatchVecF64::::zeros(), + dij_val: None, + } + } + + fn is_zero(&self) -> bool { + self.val == BatchVecF64::::zeros() + } +} + +impl IsDual for DualBatchVector where + LaneCount: SupportedLaneCount +{ +} + +impl PartialEq for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + fn eq(&self, other: &Self) -> bool { + self.val == other.val && self.dij_val == other.dij_val + } +} + +impl AbsDiffEq for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + type Epsilon = f64; + + fn default_epsilon() -> Self::Epsilon { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool { + self.val.abs_diff_eq(&other.val, epsilon) + } +} + +impl RelativeEq for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + self.val.relative_eq(&other.val, epsilon, max_relative) + } +} + +impl IsDualVector, ROWS, BATCH> + for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + fn new(val: BatchVecF64) -> Self { + DualBatchVector::::v(val) + } + + fn dij_val(self) -> Option, ROWS>> { + self.dij_val + } +} + +impl DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + /// create a dual vector + pub fn v(val: BatchVecF64) -> Self { + let mut dij_val = MutTensorDDR::, ROWS>::from_shape([ROWS, 1]); + for i in 0..ROWS { + dij_val.mut_view().get_mut([i, 0])[(i, 0)] = BatchScalarF64::::ones(); + } + + Self { + val, + dij_val: Some(dij_val), + } + } + + fn binary_dij< + const R0: usize, + const R1: usize, + F: FnMut(&BatchVecF64) -> BatchVecF64, + G: FnMut(&BatchVecF64) -> BatchVecF64, + >( + lhs_dx: &Option, R0>>, + rhs_dx: &Option, R1>>, + mut left_op: F, + mut right_op: G, + ) -> Option, ROWS>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn binary_vs_dij< + const R0: usize, + F: FnMut(&BatchVecF64) -> BatchVecF64, + G: FnMut(&BatchScalarF64) -> BatchVecF64, + >( + lhs_dx: &Option, R0>>, + rhs_dx: &Option>>, + mut left_op: F, + mut right_op: G, + ) -> Option, ROWS>> { + match (lhs_dx, rhs_dx) { + (None, None) => None, + (None, Some(rhs_dij)) => { + let out_dij = MutTensorDDR::from_map(&rhs_dij.view(), |r_dij| right_op(r_dij)); + Some(out_dij) + } + (Some(lhs_dij), None) => { + let out_dij = MutTensorDDR::from_map(&lhs_dij.view(), |l_dij| left_op(l_dij)); + Some(out_dij) + } + (Some(lhs_dij), Some(rhs_dij)) => { + let dyn_mat = + MutTensorDDR::from_map2(&lhs_dij.view(), &rhs_dij.view(), |l_dij, r_dij| { + left_op(l_dij) + right_op(r_dij) + }); + Some(dyn_mat) + } + } + } + + fn two_dx( + mut lhs_dx: Option, R0>>, + mut rhs_dx: Option, R1>>, + ) -> Option, R0, R1>> { + if lhs_dx.is_none() && rhs_dx.is_none() { + return None; + } + + if lhs_dx.is_some() && rhs_dx.is_some() { + assert_eq!( + lhs_dx.clone().unwrap().dims(), + rhs_dx.clone().unwrap().dims() + ); + } + + if lhs_dx.is_none() { + lhs_dx = Some(MutTensorDDR::from_shape(rhs_dx.clone().unwrap().dims())) + } else if rhs_dx.is_none() { + rhs_dx = Some(MutTensorDDR::from_shape(lhs_dx.clone().unwrap().dims())) + } + + Some(DijPair { + lhs: lhs_dx.unwrap(), + rhs: rhs_dx.unwrap(), + }) + } +} + +impl Neg for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchVector; + + fn neg(self) -> Self::Output { + DualBatchVector { + val: -self.val, + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDDR::from_map(&dij_val.view(), |v| -v)), + } + } +} + +impl Sub for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchVector; + + fn sub(self, rhs: Self) -> Self::Output { + DualBatchVector { + val: self.val - rhs.val, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| -r_dij), + } + } +} + +impl Add for DualBatchVector +where + LaneCount: SupportedLaneCount, +{ + type Output = DualBatchVector; + + fn add(self, rhs: Self) -> Self::Output { + DualBatchVector { + val: self.val + rhs.val, + dij_val: Self::binary_dij(&self.dij_val, &rhs.dij_val, |l_dij| *l_dij, |r_dij| *r_dij), + } + } +} + +impl IsVector, ROWS, BATCH> + for DualBatchVector +where + BatchScalarF64: IsCoreScalar, + LaneCount: SupportedLaneCount, +{ + fn from_f64(val: f64) -> Self { + DualBatchVector { + val: + as IsVector, ROWS, BATCH>>::from_f64( + val, + ), + dij_val: None, + } + } + + fn outer( + self, + rhs: DualBatchVector, + ) -> DualBatchMatrix { + let mut result = DualBatchMatrix::zeros(); + for i in 0..ROWS { + for j in 0..R2 { + result.set_elem([i, j], self.get_elem(i) * rhs.get_elem(j)); + } + } + result + } + + fn set_real_elem(&mut self, idx: usize, v: BatchScalarF64) { + self.val[idx] = v; + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[idx] = BatchScalarF64::::from_f64(0.0); + } + } + } + } + + fn norm(&self) -> DualBatchScalar { + self.clone().dot(self.clone()).sqrt() + } + + fn squared_norm(&self) -> DualBatchScalar { + self.clone().dot(self.clone()) + } + + fn get_elem(&self, idx: usize) -> DualBatchScalar { + DualBatchScalar { + val: self.val[idx], + dij_val: self + .dij_val + .clone() + .map(|dij_val| MutTensorDD::from_map(&dij_val.view(), |v| v[idx])), + } + } + + fn from_array(duals: [DualBatchScalar; ROWS]) -> Self { + let mut shape = None; + let mut val_v = BatchVecF64::::zeros(); + for i in 0..duals.len() { + let d = duals.clone()[i].clone(); + + val_v[i] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + + if shape.is_none() { + return DualBatchVector { + val: val_v, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDR::, ROWS>::from_shape(shape); + + for i in 0..duals.len() { + let d = duals.clone()[i].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, 0)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + DualBatchVector { + val: val_v, + dij_val: Some(r), + } + } + + fn from_real_array(vals: [BatchScalarF64; ROWS]) -> Self { + DualBatchVector { + val: BatchVecF64::from_real_array(vals), + dij_val: None, + } + } + + fn from_real_vector(val: BatchVecF64) -> Self { + Self { val, dij_val: None } + } + + fn real_vector(&self) -> &BatchVecF64 { + &self.val + } + + fn get_fixed_rows(&self, start: usize) -> DualBatchVector { + DualBatchVector { + val: self.val.fixed_rows::(start).into(), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start).into()) + }), + } + } + + fn to_mat(self) -> DualBatchMatrix { + DualBatchMatrix { + val: self.val, + dij_val: self.dij_val.map(|dij| dij.inner_vec_to_mat()), + } + } + + fn block_vec2( + top_row: DualBatchVector, + bot_row: DualBatchVector, + ) -> Self { + assert_eq!(R0 + R1, ROWS); + + let maybe_dij = Self::two_dx(top_row.dij_val, bot_row.dij_val); + Self { + val: BatchVecF64::::block_vec2(top_row.val, bot_row.val), + dij_val: match maybe_dij { + Some(dij_val) => { + let mut r = + MutTensorDDR::, ROWS>::from_shape(dij_val.shape()); + for d0 in 0..dij_val.shape()[0] { + for d1 in 0..dij_val.shape()[1] { + *r.mut_view().get_mut([d0, d1]) = + BatchVecF64::::block_vec2( + dij_val.lhs.get([d0, d1]), + dij_val.rhs.get([d0, d1]), + ); + } + } + Some(r) + } + None => None, + }, + } + } + + fn scaled(&self, s: DualBatchScalar) -> Self { + DualBatchVector { + val: self.val * s.val, + dij_val: Self::binary_vs_dij( + &self.dij_val, + &s.dij_val, + |l_dij| l_dij * s.val, + |r_dij| self.val * *r_dij, + ), + } + } + + fn dot(self, rhs: Self) -> DualBatchScalar { + let mut sum = DualBatchScalar::from_f64(0.0); + + for i in 0..ROWS { + sum += self.get_elem(i) * rhs.get_elem(i); + } + + sum + } + + fn normalized(&self) -> Self { + self.clone() + .scaled(DualBatchScalar::::from_f64(1.0) / self.norm()) + } + + fn from_f64_array(vals: [f64; ROWS]) -> Self { + DualBatchVector { + val: BatchVecF64::from_f64_array(vals), + dij_val: None, + } + } + + fn from_scalar_array(vals: [DualBatchScalar; ROWS]) -> Self { + let mut shape = None; + let mut val_v = BatchVecF64::::zeros(); + for i in 0..vals.len() { + let d = vals.clone()[i].clone(); + + val_v[i] = d.val; + if d.dij_val.is_some() { + shape = Some(d.dij_val.clone().unwrap().dims()); + } + } + + if shape.is_none() { + return DualBatchVector { + val: val_v, + dij_val: None, + }; + } + let shape = shape.unwrap(); + + let mut r = MutTensorDDR::, ROWS>::from_shape(shape); + + for i in 0..vals.len() { + let d = vals.clone()[i].clone(); + if d.dij_val.is_some() { + for d0 in 0..shape[0] { + for d1 in 0..shape[1] { + r.mut_view().get_mut([d0, d1])[(i, 0)] = + d.dij_val.clone().unwrap().get([d0, d1]); + } + } + } + } + DualBatchVector { + val: val_v, + dij_val: Some(r), + } + } + + fn set_elem(&mut self, idx: usize, v: DualBatchScalar) { + self.val[idx] = v.val; + if self.dij_val.is_some() { + let dij = &mut self.dij_val.as_mut().unwrap(); + for i in 0..dij.dims()[0] { + for j in 0..dij.dims()[1] { + dij.mut_view().get_mut([i, j])[idx] = v.dij_val.clone().unwrap().get([i, j]); + } + } + } + } + + fn vector(self) -> Self { + self.clone() + } + + fn to_dual(self) -> as IsScalar>::DualVector { + self + } + + fn select(self, mask: &Mask, other: Self) -> Self { + let maybe_dij = Self::two_dx(self.dij_val, other.dij_val); + + Self { + val: IsVector::select(self.val, mask, other.val), + dij_val: match maybe_dij { + Some(dij) => { + let mut r = + MutTensorDDR::, ROWS>::from_shape(dij.shape()); + for i in 0..dij.shape()[0] { + for j in 0..dij.shape()[1] { + *r.get_mut([i, j]) = + IsVector::select(dij.lhs.get([i, j]), mask, dij.rhs.get([i, j])); + } + } + Some(r) + } + _ => None, + }, + } + } + + fn get_fixed_subvec(&self, start_r: usize) -> DualBatchVector { + DualBatchVector { + val: self.val.fixed_rows::(start_r).into(), + dij_val: self.dij_val.clone().map(|dij_val| { + MutTensorDDR::from_map(&dij_val.view(), |v| v.fixed_rows::(start_r).into()) + }), + } + } +} + +#[test] +fn dual_vector_tests() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::maps::scalar_valued_maps::ScalarValuedMapFromVector; + use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::linalg::vector::IsVector; + use crate::linalg::BatchScalarF64; + use crate::points::example_points; + + #[cfg(test)] + trait Test { + fn run(); + } + + macro_rules! def_test_template { + ( $scalar:ty, $dual_scalar: ty, $batch:literal + ) => { + #[cfg(test)] + impl Test for $scalar { + fn run() { + let points = example_points::<$scalar, 4, $batch>(); + + for p in points.clone() { + for p1 in points.clone() { + { + fn dot_fn, const BATCH: usize>( + x: S::Vector<4>, + y: S::Vector<4>, + ) -> S { + x.dot(y) + } + let finite_diff = + ScalarValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + |x| dot_fn(x, p1), + p, + 1e-6, + ); + let auto_grad = + ScalarValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + |x| { + dot_fn( + x, + <$dual_scalar as IsScalar<$batch>>::Vector::<4>::from_real_vector(p1), + ) + }, + p, + ); + approx::assert_abs_diff_eq!( + finite_diff, + auto_grad, + epsilon = 0.0001 + ); + } + + fn dot_fn, const BATCH: usize>(x: S::Vector<4>, s: S) -> S::Vector<4> { + x.scaled(s) + } + let finite_diff = VectorValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + |x| dot_fn::<$scalar, $batch>(x, <$scalar>::from_f64(0.99)), + p, + 1e-6, + ); + let auto_grad = VectorValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + |x| dot_fn::<$dual_scalar, $batch>(x, <$dual_scalar>::from_f64(0.99)), + p, + ); + for i in 0..finite_diff.dims()[0] { + approx::assert_abs_diff_eq!( + finite_diff.get([i]), + auto_grad.get([i]), + epsilon = 0.0001 + ); + } + + let finite_diff = VectorValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + |x| dot_fn::<$scalar, $batch>(p1, x[0]), + p, + 1e-6, + ); + let auto_grad = VectorValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + |x| { + dot_fn::<$dual_scalar, $batch>( + <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector(p1), + x.get_elem(0), + ) + }, + p, + ); + for i in 0..finite_diff.dims()[0] { + approx::assert_abs_diff_eq!( + finite_diff.get([i]), + auto_grad.get([i]), + epsilon = 0.0001 + ); + } + } + } + } + } + }; + } + + def_test_template!(f64, DualScalar, 1); + def_test_template!(BatchScalarF64<2>, DualBatchScalar<2>, 2); + def_test_template!(BatchScalarF64<4>, DualBatchScalar<4>, 4); + + f64::run(); + BatchScalarF64::<2>::run(); + BatchScalarF64::<4>::run(); +} diff --git a/crates/sophus_calculus/src/manifold.rs b/crates/sophus_core/src/calculus/manifold.rs similarity index 100% rename from crates/sophus_calculus/src/manifold.rs rename to crates/sophus_core/src/calculus/manifold.rs diff --git a/crates/sophus_calculus/src/manifold/traits.rs b/crates/sophus_core/src/calculus/manifold/traits.rs similarity index 55% rename from crates/sophus_calculus/src/manifold/traits.rs rename to crates/sophus_core/src/calculus/manifold/traits.rs index ea5e95e..cffeab3 100644 --- a/crates/sophus_calculus/src/manifold/traits.rs +++ b/crates/sophus_core/src/calculus/manifold/traits.rs @@ -1,8 +1,8 @@ -use crate::points::example_points; -use crate::types::params::HasParams; -use crate::types::params::ParamsImpl; -use crate::types::scalar::IsScalar; -use crate::types::VecF64; +use crate::linalg::VecF64; + +use crate::linalg::scalar::IsScalar; +use crate::params::HasParams; +use crate::params::ParamsImpl; /// A tangent implementation. pub trait TangentImpl, const DOF: usize, const BATCH_SIZE: usize> { @@ -42,44 +42,16 @@ pub trait IsManifold< fn ominus(&self, rhs: &Self) -> S::Vector; } -impl ParamsImpl for VecF64 { - fn are_params_valid(_params: &>::Vector) -> bool { - true - } - - fn params_examples() -> Vec<>::Vector> { - example_points::() - } - - fn invalid_params_examples() -> Vec<>::Vector> { - vec![] - } -} - -impl HasParams for VecF64 { - fn from_params(params: &>::Vector) -> Self { - *params - } - - fn set_params(&mut self, params: &>::Vector) { - *self = *params - } - - fn params(&self) -> &>::Vector { - self - } -} - impl IsManifold for VecF64 { - fn oplus(&self, tangent: &>::Vector) -> Self { + fn oplus(&self, tangent: &VecF64) -> Self { self + tangent } - fn ominus(&self, rhs: &Self) -> >::Vector { + fn ominus(&self, rhs: &Self) -> VecF64 { self - rhs } - fn params(&self) -> &>::Vector { + fn params(&self) -> &VecF64 { self } } diff --git a/crates/sophus_calculus/src/maps.rs b/crates/sophus_core/src/calculus/maps.rs similarity index 100% rename from crates/sophus_calculus/src/maps.rs rename to crates/sophus_core/src/calculus/maps.rs diff --git a/crates/sophus_core/src/calculus/maps/curves.rs b/crates/sophus_core/src/calculus/maps/curves.rs new file mode 100644 index 0000000..2d45a62 --- /dev/null +++ b/crates/sophus_core/src/calculus/maps/curves.rs @@ -0,0 +1,210 @@ +use crate::calculus::dual::dual_matrix::IsDualMatrix; +use crate::calculus::dual::dual_scalar::IsDualScalar; +use crate::calculus::dual::dual_vector::IsDualVector; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsScalar; +use crate::linalg::vector::IsVector; +use crate::linalg::SMat; +use crate::tensor::tensor_view::IsTensorLike; +use nalgebra::SVector; + +/// A smooth curve in ℝ. +/// +/// This is a function which takes a scalar and returns a scalar: +/// +/// f: ℝ -> ℝ +pub struct ScalarValuedCurve, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> ScalarValuedCurve { + /// Finite difference quotient of the scalar-valued curve. + /// + /// The derivative is also a scalar. + pub fn sym_diff_quotient(curve: TFn, a: S, h: f64) -> S + where + TFn: Fn(S) -> S, + { + let hh = S::from_f64(h); + (curve(a.clone() + hh.clone()) - curve(a - hh)) / S::from_f64(2.0 * h) + } +} + +impl, const BATCH: usize> ScalarValuedCurve { + /// Auto differentiation of the scalar-valued curve. + pub fn fw_autodiff(curve: TFn, a: D::RealScalar) -> D::RealScalar + where + TFn: Fn(D) -> D, + { + curve(D::new(a)).dij_val().clone().unwrap().get([0, 0]) + } +} + +/// A smooth curve in ℝʳ. +/// +/// This is a function which takes a scalar and returns a vector: +/// +/// f: ℝ -> ℝʳ +pub struct VectorValuedCurve, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> VectorValuedCurve { + /// Finite difference quotient of the vector-valued curve. + /// + /// The derivative is also a vector. + pub fn sym_diff_quotient(curve: TFn, a: S, h: f64) -> S::Vector + where + TFn: Fn(S) -> S::Vector, + { + let hh = S::from_f64(h); + (curve(a.clone() + hh.clone()) - curve(a - hh)).scaled(S::from_f64(1.0 / (2.0 * h))) + } +} + +impl, const BATCH: usize> VectorValuedCurve { + /// Auto differentiation of the vector-valued curve. + pub fn fw_autodiff( + curve: TFn, + a: D::RealScalar, + ) -> SVector + where + TFn: Fn(D) -> D::Vector, + D::Vector: IsDualVector, + { + curve(D::new(a)).dij_val().unwrap().get([0, 0]) + } +} + +/// A smooth curve in ℝʳ x ℝᶜ. +/// +/// This is a function which takes a scalar and returns a matrix: +/// f: ℝ -> ℝʳ x ℝᶜ +pub struct MatrixValuedCurve, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> MatrixValuedCurve { + /// Finite difference quotient of the matrix-valued curve. + /// + /// The derivative is also a matrix. + pub fn sym_diff_quotient( + curve: TFn, + a: S, + h: f64, + ) -> S::Matrix + where + TFn: Fn(S) -> S::Matrix, + { + let hh = S::from_f64(h); + (curve(a.clone() + hh.clone()) - curve(a - hh)).scaled(S::from_f64(1.0 / (2.0 * h))) + } +} + +impl, const BATCH: usize> MatrixValuedCurve { + /// Auto differentiation of the matrix-valued curve. + pub fn fw_autodiff( + curve: TFn, + a: D::RealScalar, + ) -> SMat<>::RealScalar, ROWS, COLS> + where + TFn: Fn(D) -> D::Matrix, + D::Matrix: IsDualMatrix, + { + curve(D::new(a)).dij_val().unwrap().get([0, 0]) + } +} + +#[test] +fn curve_test() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::linalg::scalar::IsScalar; + use crate::linalg::BatchScalarF64; + + trait CurveTest { + fn run_curve_test(); + } + + macro_rules! def_curve_test_template { + ($batch:literal, $scalar: ty, $dual_scalar: ty + ) => { + impl CurveTest for $dual_scalar { + fn run_curve_test() { + use crate::linalg::matrix::IsMatrix; + use crate::linalg::vector::IsVector; + + for i in 0..10 { + let a = <$scalar>::from_f64(0.1 * (i as f64)); + + // f(x) = x^2 + fn square_fn, const BATCH: usize>(x: S) -> S { + x.clone() * x + } + let finite_diff = ScalarValuedCurve::<$scalar, $batch>::sym_diff_quotient( + square_fn, + a.clone(), + 1e-6, + ); + let auto_grad = + ScalarValuedCurve::<$dual_scalar, $batch>::fw_autodiff(square_fn, a); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + + for i in 0..10 { + let a = <$scalar>::from_f64(0.1 * (i as f64)); + + // f(x) = [cos(x), sin(x)] + fn trig_fn, const BATCH: usize>(x: S) -> S::Vector<2> { + S::Vector::<2>::from_array([x.clone().cos(), x.sin()]) + } + + let finite_diff = VectorValuedCurve::<$scalar, $batch>::sym_diff_quotient( + trig_fn, + a.clone(), + 1e-6, + ); + let auto_grad = + VectorValuedCurve::<$dual_scalar, $batch>::fw_autodiff(trig_fn, a); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + + for i in 0..10 { + let a = <$scalar>::from_f64(0.1 * (i as f64)); + + // f(x) = [[ cos(x), sin(x), 0], + // [-sin(x), cos(x), 0]] + fn fn_x, const BATCH: usize>(x: S) -> S::Matrix<2, 3> { + let sin = x.clone().sin(); + let cos = x.clone().cos(); + + S::Matrix::from_array2([ + [cos.clone(), sin.clone(), S::from_f64(0.0)], + [-sin, cos, S::from_f64(0.0)], + ]) + } + + let finite_diff = MatrixValuedCurve::<$scalar, $batch>::sym_diff_quotient( + fn_x, + a.clone(), + 1e-6, + ); + let auto_grad = + MatrixValuedCurve::<$dual_scalar, $batch>::fw_autodiff(fn_x, a); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + } + } + }; + } + + def_curve_test_template!(1, f64, DualScalar); + def_curve_test_template!(2, BatchScalarF64<2>, DualBatchScalar<2>); + def_curve_test_template!(4, BatchScalarF64<4>, DualBatchScalar<4>); + def_curve_test_template!(8, BatchScalarF64<8>, DualBatchScalar<8>); + + DualScalar::run_curve_test(); + DualBatchScalar::<2>::run_curve_test(); + DualBatchScalar::<4>::run_curve_test(); + DualBatchScalar::<8>::run_curve_test(); +} diff --git a/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs b/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs new file mode 100644 index 0000000..184cfa5 --- /dev/null +++ b/crates/sophus_core/src/calculus/maps/matrix_valued_maps.rs @@ -0,0 +1,300 @@ +use crate::calculus::dual::dual_matrix::IsDualMatrix; +use crate::calculus::dual::dual_scalar::IsDualScalar; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::matrix::IsRealMatrix; +use crate::linalg::scalar::IsRealScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::SMat; +use crate::tensor::mut_tensor::MutTensorDDRC; +use crate::tensor::mut_tensor::MutTensorDRC; +use crate::tensor::mut_tensor_view::IsMutTensorLike; +use nalgebra::SMatrix; + +use std::marker::PhantomData; + +/// Matrix-valued map on a vector space. +/// +/// This is a function which takes a vector and returns a matrix: +/// +/// f: ℝᵐ -> ℝʳ x ℝᶜ +/// +pub struct MatrixValuedMapFromVector, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> + MatrixValuedMapFromVector +{ + /// Finite difference quotient of the matrix-valued map. + /// + /// The derivative is a rank-3 tensor with shape (Rₒ x Cₒ x Rᵢ). + /// + /// For efficiency reasons, we return Rᵢ x [Rₒ x Cₒ] + pub fn sym_diff_quotient( + matrix_valued: TFn, + a: S::RealVector, + eps: f64, + ) -> MutTensorDRC + where + TFn: Fn(S::RealVector) -> SMatrix, + SMatrix: IsRealMatrix, + { + let mut out = MutTensorDRC::::from_shape([INROWS]); + let eps_v = S::RealScalar::from_f64(eps); + + for i1 in 0..INROWS { + let mut a_plus = a; + + a_plus[i1] += eps_v; + + let mut a_minus = a; + a_minus[i1] -= eps_v; + + let val = (matrix_valued(a_plus) - matrix_valued(a_minus)) + .scaled(S::from_f64(1.0 / (2.0 * eps))); + + *out.mut_view().get_mut([i1]) = val; + } + out + } +} + +impl, const BATCH: usize> + MatrixValuedMapFromVector +{ + /// Auto differentiation of the matrix-valued map. + pub fn fw_autodiff( + matrix_valued: TFn, + a: D::RealVector, + ) -> MutTensorDRC + where + TFn: Fn(D::DualVector) -> D::DualMatrix, + { + MutTensorDRC { + mut_array: matrix_valued(D::vector_v(a)) + .dij_val() + .unwrap() + .mut_array + .into_shape([INROWS]) + .unwrap(), + phantom: PhantomData, + } + } +} + +/// Matrix-valued map on a product space (=matrices). +/// +/// This is a function which takes a matrix and returns a matrix: +/// +/// f: ℝᵐ x ℝⁿ -> ℝʳ x ℝᶜ +/// +pub struct MatrixValuedMapFromMatrix, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> + MatrixValuedMapFromMatrix +{ + /// Finite difference quotient of the matrix-valued map. + /// + /// The derivative is a rank-4 tensor with shape (Rₒ x Cₒ x Rᵢ x Cᵢ). + /// + /// For efficiency reasons, we return Rᵢ x Cᵢ x [Rₒ x Cₒ] + pub fn sym_diff_quotient< + TFn, + const OUTROWS: usize, + const OUTCOLS: usize, + const INROWS: usize, + const INCOLS: usize, + >( + vector_field: TFn, + a: S::RealMatrix, + eps: f64, + ) -> MutTensorDDRC + where + TFn: Fn(S::RealMatrix) -> SMatrix, + SMatrix: IsRealMatrix, + { + let mut out = MutTensorDDRC::::from_shape_and_val( + [INROWS, INCOLS], + SMat::::zeros(), + ); + let eps_v = S::RealScalar::from_f64(eps); + for i1 in 0..INROWS { + for i0 in 0..INCOLS { + let mut a_plus = a; + + a_plus[(i1, i0)] += eps_v; + + let mut a_minus = a; + a_minus[(i1, i0)] -= eps_v; + + let val = (vector_field(a_plus) - vector_field(a_minus)) + .scaled(S::from_f64(1.0 / (2.0 * eps))); + + *out.mut_view().get_mut([i1, i0]) = val; + } + } + out + } +} + +impl, const BATCH: usize> + MatrixValuedMapFromMatrix +{ + /// Auto differentiation of the matrix-valued map. + pub fn fw_autodiff< + TFn, + const OUTROWS: usize, + const OUTCOLS: usize, + const INROWS: usize, + const INCOLS: usize, + >( + matrix_valued: TFn, + a: D::RealMatrix, + ) -> MutTensorDDRC + where + TFn: Fn(D::DualMatrix) -> D::DualMatrix, + { + matrix_valued(D::matrix_v(a)).dij_val().unwrap() + } +} + +#[test] +fn matrix_valued_map_from_vector_tests() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; + use crate::linalg::matrix::IsMatrix; + use crate::linalg::scalar::IsScalar; + use crate::linalg::vector::IsVector; + use crate::linalg::BatchScalarF64; + use crate::tensor::tensor_view::IsTensorLike; + + #[cfg(test)] + trait Test { + fn run(); + } + + macro_rules! def_test_template { + ( $scalar:ty, $dual_scalar: ty, $batch:literal + ) => { + #[cfg(test)] + impl Test for $scalar { + fn run() { + { + // [[ i ]] + // [[ ]] + // [[ j ]] [[ ]] + // [[ ]] [[ 0 -k j x ]] + // [[ k ]] [[ ]] + // hat [[ ]] = [[ k 0 -i y ]] + // [[ x ]] [[ ]] + // [[ ]] [[ -j i 0 z ]] + // [[ y ]] [[ ]] + // [[ ]] + // [[ z ]] + fn hat_fn, const BATCH: usize>( + v: S::Vector<6>, + ) -> S::Matrix<3, 4> { + let i = v.get_elem(0); + let j = v.get_elem(1); + let k = v.get_elem(2); + let ni = -i.clone(); + let nj = -j.clone(); + let nk = -k.clone(); + let x = v.get_elem(3); + let y = v.get_elem(4); + let z = v.get_elem(5); + + S::Matrix::<3, 4>::from_array2([ + [S::from_f64(0.0), nk, j, x], + [k, S::from_f64(0.0), ni, y], + [nj, i, S::from_f64(0.0), z], + ]) + } + + let a = <$scalar as IsScalar<$batch>>::Vector::<6>::new( + <$scalar>::from_f64(0.1), + <$scalar>::from_f64(0.2), + <$scalar>::from_f64(0.4), + <$scalar>::from_f64(0.7), + <$scalar>::from_f64(0.8), + <$scalar>::from_f64(0.9), + ); + + let finite_diff = + MatrixValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + hat_fn::<$scalar, $batch>, + a, + 1e-6, + ); + let auto_grad = + MatrixValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + hat_fn::<$dual_scalar, $batch>, + a, + ); + approx::assert_abs_diff_eq!( + finite_diff.view().elem_view(), + auto_grad.view().elem_view(), + epsilon = 0.0001 + ); + } + + // [[ a b ]] 1 [[ d -b ]] + // inv [[ ]] = ------- [[ ]] + // [[ c d ]] ad - bc [[ -c a ]] + + fn f, const BATCH: usize>( + m: S::Matrix<2, 2>, + ) -> S::Matrix<2, 2> { + let a = m.get_elem([0, 0]); + let b = m.get_elem([0, 1]); + + let c = m.get_elem([1, 0]); + let d = m.get_elem([1, 1]); + + let det = + S::from_f64(1.0) / (a.clone() * d.clone() - (b.clone() * c.clone())); + + S::Matrix::from_array2([ + [det.clone() * d, -det.clone() * b], + [-det.clone() * c, det * a], + ]) + } + let a = <$scalar as IsScalar<$batch>>::Matrix::<2, 2>::new( + <$scalar>::from_f64(0.1), + <$scalar>::from_f64(0.2), + <$scalar>::from_f64(0.4), + <$scalar>::from_f64(0.7), + ); + + let finite_diff = + MatrixValuedMapFromMatrix::<$scalar, $batch>::sym_diff_quotient( + f::<$scalar, $batch>, + a, + 1e-6, + ); + let auto_grad = MatrixValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + f::<$dual_scalar, $batch>, + a, + ); + + approx::assert_abs_diff_eq!( + finite_diff.view().elem_view(), + auto_grad.view().elem_view(), + epsilon = 2.0 + ); + } + } + }; + } + + def_test_template!(f64, DualScalar, 1); + def_test_template!(BatchScalarF64<2>, DualBatchScalar<2>, 2); + def_test_template!(BatchScalarF64<4>, DualBatchScalar<4>, 4); + + f64::run(); + BatchScalarF64::<2>::run(); + BatchScalarF64::<4>::run(); +} diff --git a/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs b/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs new file mode 100644 index 0000000..65424b0 --- /dev/null +++ b/crates/sophus_core/src/calculus/maps/scalar_valued_maps.rs @@ -0,0 +1,221 @@ +use crate::calculus::dual::dual_scalar::IsDualScalar; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsRealScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::vector::IsVector; +use crate::tensor::mut_tensor::MutTensorDD; +use crate::tensor::tensor_view::IsTensorLike; + +/// Scalar-valued map on a vector space. +/// +/// This is a function which takes a vector and returns a scalar: +/// +/// f: ℝᵐ -> ℝ +/// +/// These functions are also called a scalar fields (on vector spaces). +/// +pub struct ScalarValuedMapFromVector, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> ScalarValuedMapFromVector { + /// Finite difference quotient of the scalar-valued map. + /// + /// The derivative is a vector or rank-1 tensor of shape (Rᵢ). + pub fn sym_diff_quotient( + scalar_valued: TFn, + a: S::RealVector, + eps: f64, + ) -> S::RealVector + where + TFn: Fn(S::RealVector) -> S::RealScalar, + { + let mut out = S::RealVector::::zeros(); + + for r in 0..INROWS { + let mut a_plus = a; + a_plus[r] += S::RealScalar::from_f64(eps); + + let mut a_minus = a; + a_minus[r] -= S::RealScalar::from_f64(eps); + + out.set_elem( + r, + (scalar_valued(a_plus) - scalar_valued(a_minus)) + / S::RealScalar::from_f64(2.0 * eps), + ); + } + out + } +} + +impl, const BATCH: usize> ScalarValuedMapFromVector { + /// Auto differentiation of the scalar-valued map. + pub fn fw_autodiff( + scalar_valued: TFn, + a: D::RealVector, + ) -> D::RealVector + where + TFn: Fn(D::DualVector) -> D, + { + let jacobian: MutTensorDD = + scalar_valued(D::vector_v(a)).dij_val().unwrap().clone(); + assert_eq!(jacobian.dims(), [INROWS, 1]); + let mut out = D::RealVector::::zeros(); + + for r in 0..jacobian.dims()[0] { + out[r] = jacobian.get([r, 0]); + } + out + } +} + +/// Scalar-valued map on a product space (= space of matrices). +/// +/// This is a function which takes a matrix and returns a scalar: +/// +/// f: ℝᵐ x ℝⁿ -> ℝ +pub struct ScalarValuedMapFromMatrix, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> ScalarValuedMapFromMatrix { + /// Finite difference quotient of the scalar-valued map. + /// + /// The derivative is a matrix or rank-2 tensor of shape (Rᵢ x Cⱼ). + pub fn sym_diff_quotient( + scalar_valued: TFn, + a: S::RealMatrix, + eps: f64, + ) -> S::RealMatrix + where + TFn: Fn(S::RealMatrix) -> S::RealScalar, + { + let mut out = S::RealMatrix::::zeros(); + + for r in 0..INROWS { + for c in 0..INCOLS { + let mut a_plus = a; + a_plus[(r, c)] += S::RealScalar::from_f64(eps); + let mut a_minus = a; + a_minus[(r, c)] -= S::RealScalar::from_f64(eps); + + out[(r, c)] = (scalar_valued(a_plus) - scalar_valued(a_minus)) + / S::RealScalar::from_f64(2.0 * eps); + } + } + out + } +} + +impl, const BATCH: usize> ScalarValuedMapFromMatrix { + /// Auto differentiation of the scalar-valued map. + pub fn fw_autodiff( + scalar_valued: TFn, + a: D::RealMatrix, + ) -> D::RealMatrix + where + TFn: Fn(D::DualMatrix) -> D, + { + let jacobian: MutTensorDD = + scalar_valued(D::matrix_v(a)).dij_val().unwrap().clone(); + assert_eq!(jacobian.dims(), [INROWS, INCOLS]); + let mut out = D::RealMatrix::::zeros(); + + for r in 0..jacobian.dims()[0] { + for c in 0..jacobian.dims()[1] { + out[(r, c)] = jacobian.get([r, c]); + } + } + out + } +} + +#[test] +fn scalar_valued_map_tests() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::linalg::BatchScalarF64; + + #[cfg(test)] + trait Test { + fn run(); + } + + macro_rules! def_scalar_valued_map_test_template { + ($batch:literal, $scalar: ty, $dual_scalar: ty + ) => { + #[cfg(test)] + impl Test for $scalar { + fn run() { + use crate::linalg::matrix::IsMatrix; + use crate::linalg::vector::IsVector; + + let a = <$scalar as IsScalar<$batch>>::Vector::<2>::new( + <$scalar>::from_f64(0.1), + <$scalar>::from_f64(0.4), + ); + + fn f, const BATCH: usize>(x: S::Vector<2>) -> S { + x.norm() + } + + let finite_diff = + ScalarValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient(f, a, 1e-6); + let auto_grad = + ScalarValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff(f, a); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + + // [[ a, b ]] + // det [[ c, d ]] = ad - bc + // [[ e, f ]] + + fn determinant_fn, const BATCH: usize>( + mat: S::Matrix<3, 2>, + ) -> S { + let a = mat.get_elem([0, 0]); + let b = mat.get_elem([0, 1]); + + let c = mat.get_elem([1, 0]); + let d = mat.get_elem([1, 1]); + + (a * d) - (b * c) + } + + let mut mat = <$scalar as IsScalar<$batch>>::Matrix::<3, 2>::zeros(); + mat[(0, 0)] = <$scalar>::from_f64(4.6); + mat[(1, 0)] = <$scalar>::from_f64(1.6); + mat[(1, 1)] = <$scalar>::from_f64(0.6); + + let finite_diff = + ScalarValuedMapFromMatrix::<$scalar, $batch>::sym_diff_quotient( + determinant_fn, + mat, + 1e-6, + ); + let auto_grad = ScalarValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + determinant_fn, + mat, + ); + approx::assert_abs_diff_eq!(finite_diff, auto_grad, epsilon = 0.0001); + } + } + }; + } + + def_scalar_valued_map_test_template!(1, f64, DualScalar); + def_scalar_valued_map_test_template!(2, BatchScalarF64<2>, DualBatchScalar<2>); + def_scalar_valued_map_test_template!(4, BatchScalarF64<4>, DualBatchScalar<4>); + def_scalar_valued_map_test_template!(8, BatchScalarF64<8>, DualBatchScalar<8>); + def_scalar_valued_map_test_template!(16, BatchScalarF64<16>, DualBatchScalar<16>); + def_scalar_valued_map_test_template!(32, BatchScalarF64<32>, DualBatchScalar<32>); + def_scalar_valued_map_test_template!(64, BatchScalarF64<64>, DualBatchScalar<64>); + + f64::run(); + BatchScalarF64::<2>::run(); + BatchScalarF64::<4>::run(); + BatchScalarF64::<8>::run(); + BatchScalarF64::<16>::run(); + BatchScalarF64::<32>::run(); + BatchScalarF64::<64>::run(); +} diff --git a/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs b/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs new file mode 100644 index 0000000..4497f3d --- /dev/null +++ b/crates/sophus_core/src/calculus/maps/vector_valued_maps.rs @@ -0,0 +1,322 @@ +use crate::calculus::dual::dual_scalar::IsDualScalar; +use crate::calculus::dual::dual_vector::IsDualVector; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsRealScalar; +use crate::linalg::scalar::IsScalar; +use crate::linalg::vector::IsVector; +use crate::linalg::SVec; +use crate::tensor::mut_tensor::MutTensorDDR; +use crate::tensor::mut_tensor::MutTensorDR; +use crate::tensor::mut_tensor_view::IsMutTensorLike; +use crate::tensor::tensor_view::IsTensorLike; +use std::marker::PhantomData; + +/// Vector-valued map on a vector space. +/// +/// This is a function which takes a vector and returns a vector: +/// +/// f: ℝᵐ -> ℝʳ +/// +/// These functions are also called vector fields (on vector space). +/// +pub struct VectorValuedMapFromVector, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> + VectorValuedMapFromVector +{ + /// Finite difference quotient of the vector-valued map. + /// + /// The derivative is a matrix or rank-2 tensor with shape (Rₒ x Rᵢ). + /// + /// For efficiency reasons, we return the transpose Rᵢ x (Rₒ) + /// + pub fn sym_diff_quotient( + vector_valued: TFn, + a: S::RealVector, + eps: f64, + ) -> MutTensorDR + where + TFn: Fn(S::RealVector) -> SVec, + SVec: IsVector, + { + let mut out = MutTensorDR::::from_shape([INROWS]); + let eps_v = S::RealScalar::from_f64(eps); + + for r in 0..INROWS { + let mut a_plus = a; + a_plus[r] += eps_v; + + let mut a_minus = a; + a_minus[r] -= eps_v; + let d = (vector_valued(a_plus) - vector_valued(a_minus)) + .scaled(S::from_f64(1.0 / (2.0 * eps))); + + out.get_mut([r]).copy_from(&d); + } + out + } + + /// Finite difference quotient of the vector-valued map. + /// + /// The derivative is a matrix or rank-2 tensor with shape (Rₒ x Rᵢ). + /// + pub fn static_sym_diff_quotient( + vector_valued: TFn, + a: S::RealVector, + eps: f64, + ) -> S::RealMatrix + where + TFn: Fn(S::RealVector) -> SVec, + SVec: IsVector, + { + let jac = Self::sym_diff_quotient(vector_valued, a, eps); + let mut sjac = S::RealMatrix::::zeros(); + + for r in 0..INROWS { + let v = jac.get([r]); + for c in 0..OUTROWS { + sjac[(c, r)] = v[c]; + } + } + + sjac + + // todo!() + } +} + +impl, const BATCH: usize> + VectorValuedMapFromVector +{ + /// Auto differentiation of the vector-valued map. + pub fn fw_autodiff( + vector_valued: TFn, + a: D::RealVector, + ) -> MutTensorDR + where + TFn: Fn(D::DualVector) -> D::DualVector, + { + let v = vector_valued(D::vector_v(a)); + let d = v.dij_val(); + if d.is_none() { + return MutTensorDR::from_shape([INROWS]); + } + + MutTensorDR { + mut_array: d.unwrap().mut_array.into_shape([INROWS]).unwrap(), + phantom: PhantomData, + } + } + + /// Auto differentiation of the vector-valued map. + pub fn static_fw_autodiff( + vector_valued: TFn, + a: D::RealVector, + ) -> D::RealMatrix + where + TFn: Fn(D::DualVector) -> D::DualVector, + { + let jac = Self::fw_autodiff(vector_valued, a); + let mut sjac = D::RealMatrix::::zeros(); + + for r in 0..INROWS { + let v = jac.get([r]); + for c in 0..OUTROWS { + sjac[(c, r)] = v[c]; + } + } + + sjac + } +} + +/// Vector-valued map on a product space (= space of matrices). +/// +/// This is a function which takes a matrix and returns a vector: +/// +/// f: ℝᵐ x ℝⁿ -> ℝʳ +/// +/// This type of function is also called a vector field (on product spaces). +/// +pub struct VectorValuedMapFromMatrix, const BATCH: usize> { + phantom: std::marker::PhantomData, +} + +impl, const BATCH: usize> VectorValuedMapFromMatrix { + /// Finite difference quotient of the vector-valued map. + /// + /// The derivative is a matrix or rank-3 tensor with shape (Rₒ x Rᵢ x Cᵢ). + /// + /// For efficiency reasons, we return Rᵢ x Cᵢ x (Rₒ) + /// + pub fn sym_diff_quotient( + vector_valued: TFn, + a: S::RealMatrix, + eps: f64, + ) -> MutTensorDDR + where + TFn: Fn(S::RealMatrix) -> SVec, + SVec: IsVector, + { + let mut out = MutTensorDDR::::from_shape([INROWS, INCOLS]); + let eps_b = S::RealScalar::from_f64(eps); + + for c in 0..INCOLS { + for r in 0..INROWS { + let mut a_plus = a; + + a_plus[(r, c)] += eps_b; + + let mut a_minus = a; + + a_minus[(r, c)] -= eps_b; + + let vv = (vector_valued(a_plus) - vector_valued(a_minus)) + .scaled(S::from_f64(1.0 / (2.0 * eps))); + *out.mut_view().get_mut([r, c]) = vv; + } + } + out + } +} + +impl, const BATCH: usize> + VectorValuedMapFromMatrix +{ + /// Auto differentiation of the vector-valued map. + pub fn fw_autodiff( + vector_valued: TFn, + a: D::RealMatrix, + ) -> MutTensorDDR + where + TFn: Fn(D::DualMatrix) -> D::DualVector, + { + vector_valued(D::matrix_v(a)).dij_val().unwrap() + } +} + +#[test] +fn vector_valued_map_from_vector_tests() { + use crate::calculus::dual::dual_scalar::DualBatchScalar; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; + use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::linalg::matrix::IsMatrix; + use crate::linalg::vector::IsVector; + use crate::linalg::BatchScalarF64; + use crate::tensor::tensor_view::IsTensorLike; + + #[cfg(test)] + trait Test { + fn run(); + } + + macro_rules! def_test_template { + ( $scalar:ty, $dual_scalar: ty, $batch:literal + ) => { + #[cfg(test)] + impl Test for $scalar { + fn run() { + { + let a = <$scalar as IsScalar<$batch>>::RealVector::<3>::new( + <$scalar>::from_f64(0.6), + <$scalar>::from_f64(2.2), + <$scalar>::from_f64(1.1), + ); + + // [[ x ]] [[ x / z ]] + // proj [[ y ]] = [[ ]] + // [[ z ]] [[ y / z ]] + fn proj_fn, const BATCH: usize>( + v: S::Vector<3>, + ) -> S::Vector<2> { + let x = IsVector::get_elem(&v, 0); + let y = IsVector::get_elem(&v, 1); + let z = IsVector::get_elem(&v, 2); + S::Vector::<2>::from_array([x / z.clone(), y / z]) + } + + let finite_diff = + VectorValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + proj_fn::<$scalar, $batch>, + a, + 1e-6, + ); + let auto_grad = + VectorValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + proj_fn::<$dual_scalar, $batch>, + a, + ); + for i in 0..2 { + approx::assert_abs_diff_eq!( + finite_diff.get([i]), + auto_grad.get([i]), + epsilon = 0.0001 + ); + } + + let sfinite_diff = + VectorValuedMapFromVector::<$scalar, $batch>::static_sym_diff_quotient( + proj_fn::<$scalar, $batch>, + a, + 1e-6, + ); + let sauto_grad = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff( + proj_fn::<$dual_scalar, $batch>, + a, + ); + approx::assert_abs_diff_eq!(sfinite_diff, sauto_grad, epsilon = 0.0001); + } + + fn f, const BATCH: usize>( + x: S::Matrix<3, 2>, + ) -> S::Vector<4> { + let a = x.get_elem([0, 0]); + let b = x.get_elem([0, 1]); + let c = x.get_elem([1, 0]); + let d = x.get_elem([1, 1]); + let e = x.get_elem([2, 0]); + let f = x.get_elem([2, 1]); + + S::Vector::<4>::from_array([a + b, c + d, e + f, S::from_f64(1.0)]) + } + + let mut mat = <$scalar as IsScalar<$batch>>::RealMatrix::<3, 2>::zeros(); + mat[(0, 0)] = <$scalar>::from_f64(-4.6); + mat[(0, 1)] = <$scalar>::from_f64(-1.6); + mat[(1, 0)] = <$scalar>::from_f64(0.6); + mat[(1, 1)] = <$scalar>::from_f64(1.6); + mat[(2, 0)] = <$scalar>::from_f64(-1.6); + mat[(2, 1)] = <$scalar>::from_f64(0.2); + + let finite_diff = + VectorValuedMapFromMatrix::<$scalar, $batch>::sym_diff_quotient( + f::<$scalar, $batch>, + mat, + 1e-6, + ); + let auto_grad = VectorValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff( + f::<$dual_scalar, $batch>, + mat, + ); + approx::assert_abs_diff_eq!( + finite_diff.elem_view(), + auto_grad.elem_view(), + epsilon = 0.0001 + ); + } + } + }; + } + + def_test_template!(f64, DualScalar, 1); + def_test_template!(BatchScalarF64<2>, DualBatchScalar<2>, 2); + def_test_template!(BatchScalarF64<4>, DualBatchScalar<4>, 4); + + f64::run(); + BatchScalarF64::<2>::run(); + BatchScalarF64::<4>::run(); +} diff --git a/crates/sophus_calculus/src/region.rs b/crates/sophus_core/src/calculus/region.rs similarity index 100% rename from crates/sophus_calculus/src/region.rs rename to crates/sophus_core/src/calculus/region.rs diff --git a/crates/sophus_calculus/src/spline.rs b/crates/sophus_core/src/calculus/spline.rs similarity index 80% rename from crates/sophus_calculus/src/spline.rs rename to crates/sophus_core/src/calculus/spline.rs index 109e97f..5e60aa1 100644 --- a/crates/sophus_calculus/src/spline.rs +++ b/crates/sophus_core/src/calculus/spline.rs @@ -1,26 +1,21 @@ /// Cubic B-Spline details pub mod spline_segment; -use crate::spline::spline_segment::CubicBSplineSegment; -use crate::spline::spline_segment::SegmentCase; -use crate::types::scalar::IsScalar; -use crate::types::vector::IsVectorLike; - -use assertables::assert_ge; -use assertables::assert_ge_as_result; -use assertables::assert_le; -use assertables::assert_le_as_result; -use nalgebra::Scalar; +use crate::calculus::spline::spline_segment::CubicBSplineSegment; +use crate::calculus::spline::spline_segment::SegmentCase; +use crate::linalg::bool_mask::BoolMask; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsSingleScalar; /// Cubic B-Spline implementation -pub struct CubicBSplineImpl, const DIMS: usize> { +pub struct CubicBSplineImpl { /// Control points - pub control_points: Vec>, + pub control_points: Vec>, /// delta between control points pub delta_t: S, } -impl, const DIMS: usize> CubicBSplineImpl { +impl CubicBSplineImpl { /// indices involved pub fn idx_involved(&self, segment_idx: usize) -> Vec { let num = self.num_segments(); @@ -34,7 +29,7 @@ impl, const DIMS: usize> CubicBSplineImpl { } /// Interpolate - pub fn interpolate(&self, segment_idx: usize, u: S) -> S::Vector { + pub fn interpolate(&self, segment_idx: usize, u: S) -> S::SingleVector { let num = self.num_segments(); assert!(segment_idx < num); @@ -73,7 +68,7 @@ impl, const DIMS: usize> CubicBSplineImpl { segment_idx: usize, u: S, control_point_idx: usize, - ) -> S::Matrix { + ) -> S::SingleMatrix { let num = self.num_segments(); assert!(segment_idx < num); @@ -99,7 +94,7 @@ impl, const DIMS: usize> CubicBSplineImpl { ], }; - let mut dxi = S::Matrix::::zero(); + let mut dxi: S::SingleMatrix = S::SingleMatrix::::zeros(); if idx_prev == control_point_idx { dxi = dxi + spline_segment.dxi_interpolate(u.clone(), 0); } @@ -118,7 +113,7 @@ impl, const DIMS: usize> CubicBSplineImpl { /// Index and u #[derive(Clone, Debug, Copy)] -pub struct IndexAndU { +pub struct IndexAndU { /// segment index pub segment_idx: usize, /// u @@ -126,7 +121,7 @@ pub struct IndexAndU { } /// Cubic B-Spline -pub struct CubicBSpline, const DIMS: usize> { +pub struct CubicBSpline { /// Cubic B-Spline implementation pub spline_impl: CubicBSplineImpl, /// start time t0 @@ -135,16 +130,16 @@ pub struct CubicBSpline, const DIMS: usize> { /// Cubic B-Spline parameters #[derive(Clone, Debug, Copy)] -pub struct CubicBSplineParams + 'static> { +pub struct CubicBSplineParams { /// delta between control points pub delta_t: S, /// start time t0 pub t0: S, } -impl + 'static, const DIMS: usize> CubicBSpline { +impl CubicBSpline { /// create a new cubic B-Spline - pub fn new(control_points: Vec>, params: CubicBSplineParams) -> Self { + pub fn new(control_points: Vec>, params: CubicBSplineParams) -> Self { Self { spline_impl: CubicBSplineImpl { control_points, @@ -155,14 +150,14 @@ impl + 'static, const DIMS: usize> CubicBSpline { } /// interpolate - pub fn interpolate(&self, t: S) -> S::Vector { + pub fn interpolate(&self, t: S) -> S::SingleVector { let index_and_u = self.index_and_u(t); self.spline_impl .interpolate(index_and_u.segment_idx, index_and_u.u) } /// derivative of the interpolation - pub fn dxi_interpolate(&self, t: S, control_point_idx: usize) -> S::Matrix { + pub fn dxi_interpolate(&self, t: S, control_point_idx: usize) -> S::SingleMatrix { let index_and_u = self.index_and_u(t); self.spline_impl .dxi_interpolate(index_and_u.segment_idx, index_and_u.u, control_point_idx) @@ -176,20 +171,20 @@ impl + 'static, const DIMS: usize> CubicBSpline { /// index and u pub fn index_and_u(&self, t: S) -> IndexAndU { - assert_ge!(t.real(), self.t0.real()); - assert_le!(t.real(), self.t_max().real()); + assert!(t.greater_equal(&self.t0).all()); + assert!(t.less_equal(&self.t_max()).all()); - let normalized_t = self.normalized_t(t.clone()); + let normalized_t: S = self.normalized_t(t.clone()); let mut idx_and_u = IndexAndU:: { - segment_idx: normalized_t.floor() as usize, + segment_idx: normalized_t.i64_floor() as usize, u: normalized_t.clone().fract(), }; println!("{:?}", idx_and_u); let eps = 0.00001; - if idx_and_u.u.real() > eps { + if idx_and_u.u.single_real_scalar() > eps { println!("case A"); return idx_and_u; } @@ -203,7 +198,7 @@ impl + 'static, const DIMS: usize> CubicBSpline { println!("case C"); idx_and_u.segment_idx -= 1; - idx_and_u.u = idx_and_u.u + S::c(1.0); + idx_and_u.u += S::from_f64(1.0); idx_and_u } @@ -220,7 +215,7 @@ impl + 'static, const DIMS: usize> CubicBSpline { /// t_max pub fn t_max(&self) -> S { - self.t0.clone() + S::c(self.num_segments() as f64) * self.spline_impl.delta_t.clone() + self.t0.clone() + S::from_f64(self.num_segments() as f64) * self.spline_impl.delta_t.clone() } } @@ -228,11 +223,11 @@ mod test { #[test] fn test() { - use super::super::points::example_points; use super::super::spline::CubicBSpline; use super::super::spline::CubicBSplineParams; + use crate::points::example_points; - let points = example_points::(); + let points = example_points::(); for (t0, delta_t) in [(0.0, 1.0)] { let params = CubicBSplineParams { delta_t, t0 }; diff --git a/crates/sophus_calculus/src/spline/spline_segment.rs b/crates/sophus_core/src/calculus/spline/spline_segment.rs similarity index 60% rename from crates/sophus_calculus/src/spline/spline_segment.rs rename to crates/sophus_core/src/calculus/spline/spline_segment.rs index f9697fb..5e488fb 100644 --- a/crates/sophus_calculus/src/spline/spline_segment.rs +++ b/crates/sophus_core/src/calculus/spline/spline_segment.rs @@ -1,8 +1,6 @@ -use crate::types::matrix::IsMatrix; -use crate::types::scalar::IsScalar; -use crate::types::vector::IsVector; -use crate::types::vector::IsVectorLike; - +use crate::linalg::matrix::IsMatrix; +use crate::linalg::scalar::IsSingleScalar; +use crate::linalg::vector::IsVector; use std::marker::PhantomData; /// cubic basis function @@ -10,10 +8,10 @@ pub struct CubicBasisFunction { phantom: PhantomData, } -impl> CubicBasisFunction { +impl CubicBasisFunction { /// C matrix - pub fn c() -> S::Matrix<3, 4> { - S::Matrix::<3, 4>::from_c_array2([ + pub fn c() -> S::SingleMatrix<3, 4> { + S::SingleMatrix::<3, 4>::from_f64_array2([ [5.0 / 6.0, 3.0 / 6.0, -3.0 / 6.0, 1.0 / 6.0], [1.0 / 6.0, 3.0 / 6.0, 3.0 / 6.0, -2.0 / 6.0], [0.0, 0.0, 0.0, 1.0 / 6.0], @@ -21,49 +19,63 @@ impl> CubicBasisFunction { } /// B(u) matrix - pub fn b(u: S) -> S::Vector<3> { + pub fn b(u: S) -> S::SingleVector<3> { let u_sq = u.clone() * u.clone(); - Self::c() * S::Vector::<4>::from_array([1.0.into(), u.clone(), u_sq.clone(), u_sq * u]) + let m34 = Self::c(); + let v4 = + S::SingleVector::<4>::from_array([S::from_f64(1.0), u.clone(), u_sq.clone(), u_sq * u]); + + m34 * v4 } /// derivative of B(u) matrix with respect to u - pub fn du_b(u: S, delta_t: S) -> S::Vector<3> { + pub fn du_b(u: S, delta_t: S) -> S::SingleVector<3> { let u_sq = u.clone() * u.clone(); - Self::c().scaled(S::c(1.0) / delta_t) - * S::Vector::<4>::from_array([S::c(0.0), S::c(1.0), S::c(2.0) * u, S::c(3.0) * u_sq]) + Self::c().scaled(S::from_f64(1.0) / delta_t) + * S::SingleVector::<4>::from_array([ + S::from_f64(0.0), + S::from_f64(1.0), + S::from_f64(2.0) * u, + S::from_f64(3.0) * u_sq, + ]) } /// second derivative of B(u) matrix with respect to u - pub fn du2_b(u: S, delta_t: S) -> S::Vector<3> { - Self::c().scaled(S::c(1.0) / (delta_t.clone() * delta_t)) - * S::Vector::<4>::from_array([S::c(0.0), S::c(0.0), S::c(2.0), S::c(6.0) * u]) + pub fn du2_b(u: S, delta_t: S) -> S::SingleVector<3> { + Self::c().scaled(S::from_f64(1.0) / (delta_t.clone() * delta_t)) + * S::SingleVector::<4>::from_array([ + S::from_f64(0.0), + S::from_f64(0.0), + S::from_f64(2.0), + S::from_f64(6.0) * u, + ]) } } /// cubic B-spline function -pub struct CubicBSplineFn, const DIMS: usize> { +pub struct CubicBSplineFn { phantom: PhantomData, } -impl, const DIMS: usize> CubicBSplineFn { +impl CubicBSplineFn { fn interpolate( - control_point: S::Vector, - control_points: [S::Vector; 3], + control_point: S::SingleVector, + control_points: [S::SingleVector; 3], u: S, - ) -> S::Vector { + ) -> S::SingleVector { let b = CubicBasisFunction::::b(u); control_point - + control_points[0].scaled(b.get(0)) - + control_points[1].scaled(b.get(1)) - + control_points[2].scaled(b.get(2)) + + control_points[0].scaled(b.get_elem(0)) + + control_points[1].scaled(b.get_elem(1)) + + control_points[2].scaled(b.get_elem(2)) } - fn dxi_interpolate(u: S, quadruple_idx: usize) -> S::Matrix { + fn dxi_interpolate(u: S, quadruple_idx: usize) -> S::SingleMatrix { let b = CubicBasisFunction::::b(u.clone()); if quadruple_idx == 0 { - S::Matrix::::identity() + S::SingleMatrix::::identity() } else { - S::Matrix::::identity().scaled(b.get(quadruple_idx - 1)) + S::SingleMatrix::::identity().scaled(b.get_elem(quadruple_idx - 1)) } } } @@ -81,19 +93,19 @@ pub enum SegmentCase { /// Cubic B-spline segment #[derive(Clone, Debug)] -pub struct CubicBSplineSegment, const DIMS: usize> { +pub struct CubicBSplineSegment { pub(crate) case: SegmentCase, - pub(crate) control_points: [S::Vector; 4], + pub(crate) control_points: [S::SingleVector; 4], } -impl, const DIMS: usize> CubicBSplineSegment { +impl CubicBSplineSegment { /// Interpolate - pub fn interpolate(&self, u: S) -> S::Vector { + pub fn interpolate(&self, u: S) -> S::SingleVector { match self.case { SegmentCase::First => CubicBSplineFn::interpolate( self.control_points[1].clone(), [ - S::Vector::::zero(), + S::SingleVector::::zeros(), self.control_points[2].clone() - self.control_points[1].clone(), self.control_points[3].clone() - self.control_points[2].clone(), ], @@ -113,7 +125,7 @@ impl, const DIMS: usize> CubicBSplineSegment { [ self.control_points[1].clone() - self.control_points[0].clone(), self.control_points[2].clone() - self.control_points[1].clone(), - S::Vector::::zero(), + S::SingleVector::::zeros(), ], u, ), @@ -121,11 +133,11 @@ impl, const DIMS: usize> CubicBSplineSegment { } /// Derivative of the interpolation with respect to u - pub fn dxi_interpolate(&self, u: S, quadruple_idx: usize) -> S::Matrix { + pub fn dxi_interpolate(&self, u: S, quadruple_idx: usize) -> S::SingleMatrix { match self.case { SegmentCase::First => { if quadruple_idx == 0 { - S::Matrix::::zero() + S::SingleMatrix::::zeros() } else if quadruple_idx == 1 { CubicBSplineFn::dxi_interpolate(u.clone(), 0) - CubicBSplineFn::dxi_interpolate(u.clone(), 2) @@ -160,7 +172,7 @@ impl, const DIMS: usize> CubicBSplineSegment { } else if quadruple_idx == 2 { CubicBSplineFn::dxi_interpolate(u.clone(), 2) } else { - S::Matrix::::zero() + S::SingleMatrix::::zeros() } } } @@ -171,17 +183,17 @@ mod test { #[test] fn test_spline_basis_fn() { - use crate::dual::dual_scalar::Dual; - use crate::dual::dual_vector::DualV; - use crate::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::dual::dual_vector::DualVector; + use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::calculus::spline::spline_segment::CubicBSplineFn; + use crate::linalg::scalar::IsScalar; + use crate::linalg::vector::IsVector; + use crate::linalg::VecF64; use crate::points::example_points; - use crate::spline::spline_segment::CubicBSplineFn; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::vector::IsVectorLike; - use crate::types::VecF64; + use num_traits::Zero; - let points = &example_points::(); + let points = &example_points::(); assert!(points.len() >= 8); let mut u = 0.0; @@ -192,38 +204,44 @@ mod test { for p_idx in 0..points.len() - 4 { let first_control_point = points[p_idx]; - let first_control_point_dual = DualV::c(points[p_idx]); + let first_control_point_dual = DualVector::from_real_vector(points[p_idx]); let mut segment_control_points = [VecF64::<3>::zeros(); 3]; - let mut segment_control_points_dual = - [DualV::<3>::zero(), DualV::<3>::zero(), DualV::<3>::zero()]; + let mut segment_control_points_dual = [ + DualVector::<3>::zero(), + DualVector::<3>::zero(), + DualVector::<3>::zero(), + ]; for i in 0..3 { segment_control_points[i] = points[p_idx + 1]; - segment_control_points_dual[i] = DualV::c(segment_control_points[i]); + segment_control_points_dual[i] = + DualVector::from_real_vector(segment_control_points[i]); } - let f0 = |x| -> DualV<3> { - CubicBSplineFn::::interpolate( + let f0 = |x| -> DualVector<3> { + CubicBSplineFn::::interpolate( x, segment_control_points_dual.clone(), - Dual::c(u), + DualScalar::from_real_scalar(u), ) }; - let auto_dx0 = - VectorValuedMapFromVector::static_fw_autodiff(f0, first_control_point); + let auto_dx0 = VectorValuedMapFromVector::::static_fw_autodiff( + f0, + first_control_point, + ); let analytic_dx0 = CubicBSplineFn::::dxi_interpolate(u, 0); approx::assert_abs_diff_eq!(auto_dx0, analytic_dx0, epsilon = 0.0001); for i in 0..3 { - let fi = |x| -> DualV<3> { + let fi = |x| -> DualVector<3> { let mut seg = segment_control_points_dual.clone(); seg[i] = x; - CubicBSplineFn::::interpolate( + CubicBSplineFn::::interpolate( first_control_point_dual.clone(), seg, - Dual::c(u), + DualScalar::from_real_scalar(u), ) }; - let auto_dxi = VectorValuedMapFromVector::static_fw_autodiff( + let auto_dxi = VectorValuedMapFromVector::::static_fw_autodiff( fi, segment_control_points[i], ); @@ -237,32 +255,33 @@ mod test { #[test] fn test_spline_segment() { - use crate::dual::dual_scalar::Dual; - use crate::dual::dual_vector::DualV; - use crate::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::calculus::dual::dual_scalar::DualScalar; + use crate::calculus::dual::dual_vector::DualVector; + use crate::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use crate::calculus::spline::spline_segment::CubicBSplineSegment; + use crate::calculus::spline::spline_segment::SegmentCase; + use crate::linalg::scalar::IsScalar; + use crate::linalg::vector::IsVector; + use crate::linalg::VecF64; use crate::points::example_points; - use crate::spline::spline_segment::CubicBSplineSegment; - use crate::spline::spline_segment::SegmentCase; - use crate::types::scalar::IsScalar; - use crate::types::vector::IsVector; - use crate::types::vector::IsVectorLike; - use crate::types::VecF64; + use num_traits::Zero; - let points = &example_points::(); + let points = &example_points::(); assert!(points.len() >= 8); for p_idx in 0..points.len() - 4 { let mut segment_control_points = [VecF64::<3>::zeros(); 4]; let mut segment_control_points_dual = [ - DualV::<3>::zero(), - DualV::<3>::zero(), - DualV::<3>::zero(), - DualV::<3>::zero(), + DualVector::<3>::zero(), + DualVector::<3>::zero(), + DualVector::<3>::zero(), + DualVector::<3>::zero(), ]; for i in 0..4 { segment_control_points[i] = points[p_idx]; - segment_control_points_dual[i] = DualV::c(segment_control_points[i]); + segment_control_points_dual[i] = + DualVector::from_real_vector(segment_control_points[i]); } for case in [SegmentCase::First, SegmentCase::Normal, SegmentCase::Last] { @@ -288,17 +307,21 @@ mod test { f, points[0], 0.0001, ); - let f = |v: DualV<3>| { - let mut base_dual = CubicBSplineSegment:: { + let f = |v: DualVector<3>| { + let mut base_dual = CubicBSplineSegment:: { case, control_points: segment_control_points_dual.clone(), }; base_dual.control_points[i] = v; - base_dual.interpolate(Dual::c(u)) + base_dual.interpolate(DualScalar::from_real_scalar(u)) }; - let auto_dx = VectorValuedMapFromVector::static_fw_autodiff(f, points[i]); + let auto_dx = + VectorValuedMapFromVector::::static_fw_autodiff( + f, + segment_control_points[i], + ); approx::assert_abs_diff_eq!(analytic_dx, num_dx, epsilon = 0.0001); approx::assert_abs_diff_eq!(analytic_dx, auto_dx, epsilon = 0.0001); diff --git a/crates/sophus_core/src/lib.rs b/crates/sophus_core/src/lib.rs new file mode 100644 index 0000000..c46f464 --- /dev/null +++ b/crates/sophus_core/src/lib.rs @@ -0,0 +1,12 @@ +#![feature(portable_simd)] + +/// calculus +pub mod calculus; +/// linear algebra types +pub mod linalg; +/// params +pub mod params; +/// points +pub mod points; +/// tensors +pub mod tensor; diff --git a/crates/sophus_core/src/linalg.rs b/crates/sophus_core/src/linalg.rs new file mode 100644 index 0000000..cd5c702 --- /dev/null +++ b/crates/sophus_core/src/linalg.rs @@ -0,0 +1,94 @@ +pub mod bool_mask; +pub mod matrix; +pub mod scalar; +pub mod vector; +use std::ops::Add; +use std::simd::cmp::SimdPartialEq; +use std::simd::num::SimdFloat; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::Simd; +use std::simd::SimdElement; +use std::simd::SupportedLaneCount; + +/// Static vector +pub type SVec = nalgebra::SVector; +/// Static matrix +pub type SMat = + nalgebra::SMatrix; + +/// Batch scalar +#[derive(Clone, Debug, PartialEq, Copy)] +pub struct BatchScalar( + Simd, +) +where + LaneCount: SupportedLaneCount; + +pub type BatchVec = + nalgebra::SVector, ROWS>; + +pub type BatchMat = + nalgebra::SMatrix, ROWS, COLS>; + +/// f32 vector +pub type VecF32 = nalgebra::SVector; +/// f64 vector +pub type VecF64 = nalgebra::SMatrix; +/// f64 matrix +pub type MatF32 = nalgebra::SMatrix; +/// f64 matrix +pub type MatF64 = nalgebra::SMatrix; + +/// batch of f64 scalars +pub type BatchScalarF64 = BatchScalar; +/// batch of f64 vectors +pub type BatchVecF64 = BatchVec; +/// batch of f64 matrices +pub type BatchMatF64 = + BatchMat; + +impl Add for BatchScalar +where + LaneCount: SupportedLaneCount, + Simd: Add>, +{ + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + Self(self.0 + rhs.0) + } +} + +impl num_traits::Zero + for BatchScalar +where + LaneCount: SupportedLaneCount, + Simd: SimdFloat, + Simd: + SimdPartialEq> + Add>, +{ + fn zero() -> Self { + Self(Simd::::splat(S::zero())) + } + + fn is_zero(&self) -> bool { + let b = self.0.simd_eq(Simd::::splat(S::zero())); + b.all() + } +} + +#[test] +fn test_core() { + use crate::linalg::scalar::IsScalar; + use approx::assert_abs_diff_eq; + + let vec = SVec::::new(1.0, 2.0, 3.0); + assert_abs_diff_eq!(vec, SVec::::new(1.0, 2.0, 3.0)); + + let batch_scalar = BatchScalar::(Simd::::from_array([1.0, 2.0, 3.0, 4.0])); + assert_abs_diff_eq!( + batch_scalar, + BatchScalarF64::<4>::from_real_array([1.0, 2.0, 3.0, 4.0]) + ); +} diff --git a/crates/sophus_core/src/linalg/bool_mask.rs b/crates/sophus_core/src/linalg/bool_mask.rs new file mode 100644 index 0000000..7c2404a --- /dev/null +++ b/crates/sophus_core/src/linalg/bool_mask.rs @@ -0,0 +1,71 @@ +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::MaskElement; +use std::simd::SupportedLaneCount; + +pub trait BoolMask { + fn all_true() -> Self; + fn all_false() -> Self; + fn all(&self) -> bool; + fn any(&self) -> bool; + fn count(&self) -> usize; + fn lanes(&self) -> usize; +} + +impl BoolMask for bool { + fn all_true() -> bool { + true + } + fn all_false() -> bool { + false + } + + fn all(&self) -> bool { + *self + } + + fn any(&self) -> bool { + *self + } + + fn count(&self) -> usize { + match *self { + true => 1, + false => 0, + } + } + + fn lanes(&self) -> usize { + 1 + } +} + +impl BoolMask for Mask +where + T: MaskElement, + LaneCount: SupportedLaneCount, +{ + fn all_true() -> Self { + Mask::from_array([true; BATCH]) + } + + fn all_false() -> Self { + Mask::from_array([false; BATCH]) + } + + fn all(&self) -> bool { + Mask::all(*self) + } + + fn any(&self) -> bool { + Mask::any(*self) + } + + fn count(&self) -> usize { + self.to_array().iter().filter(|x| **x).count() + } + + fn lanes(&self) -> usize { + BATCH + } +} diff --git a/crates/sophus_core/src/linalg/matrix.rs b/crates/sophus_core/src/linalg/matrix.rs new file mode 100644 index 0000000..12f7157 --- /dev/null +++ b/crates/sophus_core/src/linalg/matrix.rs @@ -0,0 +1,426 @@ +use super::scalar::IsRealScalar; +use super::scalar::IsSingleScalar; +use crate::calculus::dual::dual_matrix::DualBatchMatrix; +use crate::calculus::dual::dual_matrix::DualMatrix; +use crate::linalg::scalar::IsScalar; +use crate::linalg::BatchMatF64; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::MatF64; +use crate::linalg::VecF64; +use approx::AbsDiffEq; +use approx::RelativeEq; +use num_traits::Zero; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::Index; +use std::ops::IndexMut; +use std::ops::Mul; +use std::ops::Neg; +use std::ops::Sub; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::SupportedLaneCount; + +/// Matrix - either a real (f64) or a dual number matrix +pub trait IsMatrix< + S: IsScalar, + const ROWS: usize, + const COLS: usize, + const BATCH_SIZE: usize, +>: + Debug + + Clone + + Sized + + Mul, Output = S::Vector> + + Neg + + Add + + Sub + + Neg + + AbsDiffEq + + RelativeEq +{ + /// create 1x2 block matrix + fn block_mat1x2( + left_col: S::Matrix, + righ_col: S::Matrix, + ) -> Self; + + fn set_elem(&mut self, idx: [usize; 2], val: S); + + /// create 2x1 block matrix + fn block_mat2x1( + top_row: S::Matrix, + bot_row: S::Matrix, + ) -> Self; + + /// create 2x2 block matrix + fn block_mat2x2( + top_row: (S::Matrix, S::Matrix), + bot_row: (S::Matrix, S::Matrix), + ) -> Self; + + fn select(self, mask: &S::Mask, other: Self) -> Self; + + /// create from 2d array + fn from_array2(vals: [[S; COLS]; ROWS]) -> Self; + + /// create from constant 2d array + fn from_real_array2(vals: [[S::RealScalar; COLS]; ROWS]) -> Self; + + /// create from constant 2d array + fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self; + + /// create a constant matrix + fn from_real_matrix(val: S::RealMatrix) -> Self; + + fn from_scalar(val: S) -> Self; + + /// extract column vector + fn get_col_vec(&self, c: usize) -> S::Vector; + + /// extract row vector + fn get_row_vec(&self, r: usize) -> S::Vector; + + /// get element + fn get_elem(&self, idx: [usize; 2]) -> S; + + /// get fixed submatrix + fn get_fixed_submat( + &self, + start_r: usize, + start_c: usize, + ) -> S::Matrix; + + fn to_dual(self) -> S::DualMatrix; + + /// create an identity matrix + fn identity() -> Self; + + /// matrix multiplication + fn mat_mul(&self, other: S::Matrix) -> S::Matrix; + + /// return the real part + fn real_matrix(&self) -> &S::RealMatrix; + + /// return scaled matrix + fn scaled(&self, v: S) -> Self; + + /// create a constant scalar + fn from_f64(val: f64) -> Self; + + fn set_col_vec(&mut self, c: usize, v: S::Vector); + + /// ones + fn ones() -> Self { + Self::from_f64(1.0) + } + + /// zeros + fn zeros() -> Self { + Self::from_f64(0.0) + } +} + +/// is real vector like +pub trait IsRealMatrix< + S: IsRealScalar + IsScalar, + const ROWS: usize, + const COLS: usize, + const BATCH_SIZE: usize, +>: + IsMatrix + + Index<(usize, usize), Output = S> + + IndexMut<(usize, usize), Output = S> + + Copy +{ +} + +/// Matrix - either a real (f64) or a dual number matrix +pub trait IsSingleMatrix: + IsMatrix + Mul, Output = S::SingleVector> +{ +} + +impl IsRealMatrix for MatF64 {} + +impl IsSingleMatrix for MatF64 {} + +impl IsMatrix for MatF64 { + fn from_real_matrix(val: MatF64) -> Self { + val + } + + fn from_scalar(val: f64) -> Self { + Self::from_f64(val) + } + + fn from_array2(vals: [[f64; COLS]; ROWS]) -> MatF64 { + let mut m = MatF64::::zeros(); + + for c in 0..COLS { + for r in 0..ROWS { + m[(r, c)] = vals[r][c]; + } + } + m + } + + fn from_real_array2(vals: [[f64; COLS]; ROWS]) -> Self { + let mut m = MatF64::::zeros(); + for c in 0..COLS { + for r in 0..ROWS { + m[(r, c)] = vals[r][c]; + } + } + m + } + + fn get_elem(&self, idx: [usize; 2]) -> f64 { + self[(idx[0], idx[1])] + } + + fn identity() -> Self { + Self::identity() + } + + fn real_matrix(&self) -> &Self { + self + } + + fn mat_mul(&self, other: MatF64) -> MatF64 { + self * other + } + + fn block_mat2x1( + top_row: MatF64, + bot_row: MatF64, + ) -> Self { + assert_eq!(ROWS, R0 + R1); + let mut m = Self::zero(); + + m.fixed_view_mut::(0, 0).copy_from(&top_row); + m.fixed_view_mut::(R0, 0).copy_from(&bot_row); + m + } + + fn block_mat2x2( + top_row: (MatF64, MatF64), + bot_row: (MatF64, MatF64), + ) -> Self { + assert_eq!(ROWS, R0 + R1); + assert_eq!(COLS, C0 + C1); + let mut m = Self::zero(); + + m.fixed_view_mut::(0, 0).copy_from(&top_row.0); + m.fixed_view_mut::(0, C0).copy_from(&top_row.1); + + m.fixed_view_mut::(R0, 0).copy_from(&bot_row.0); + m.fixed_view_mut::(R0, C0).copy_from(&bot_row.1); + m + } + + fn block_mat1x2( + left_col: MatF64, + righ_col: MatF64, + ) -> Self { + assert_eq!(COLS, C0 + C1); + let mut m = Self::zero(); + + m.fixed_view_mut::(0, 0).copy_from(&left_col); + m.fixed_view_mut::(0, C0).copy_from(&righ_col); + + m + } + + fn get_fixed_submat( + &self, + start_r: usize, + start_c: usize, + ) -> MatF64 { + self.fixed_view::(start_r, start_c).into() + } + + fn get_col_vec(&self, c: usize) -> VecF64 { + self.fixed_view::(0, c).into() + } + + fn get_row_vec(&self, r: usize) -> VecF64 { + self.fixed_view::<1, COLS>(r, 0).transpose() + } + + fn scaled(&self, v: f64) -> Self { + self * v + } + + fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { + let mut m = MatF64::::zeros(); + for c in 0..COLS { + for r in 0..ROWS { + m[(r, c)] = vals[r][c]; + } + } + m + } + + fn from_f64(val: f64) -> Self { + MatF64::::from_element(val) + } + + fn set_col_vec(&mut self, c: usize, v: VecF64) { + self.fixed_columns_mut::<1>(c).copy_from(&v); + } + + fn to_dual(self) -> >::DualMatrix { + DualMatrix::from_real_matrix(self) + } + + fn select(self, mask: &bool, other: Self) -> Self { + if *mask { + self + } else { + other + } + } + + fn set_elem(&mut self, idx: [usize; 2], val: f64) { + self[(idx[0], idx[1])] = val; + } +} + +impl + IsMatrix, ROWS, COLS, BATCH> for BatchMatF64 +where + LaneCount: SupportedLaneCount, +{ + fn from_scalar(val: BatchScalarF64) -> Self { + Self::from_element(val) + } + + fn from_real_matrix(val: Self) -> Self { + val + } + + fn real_matrix(&self) -> &Self { + self + } + + fn scaled(&self, v: BatchScalarF64) -> Self { + self * v + } + + fn identity() -> Self { + nalgebra::SMatrix::, ROWS, COLS>::identity() + } + + fn from_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { + Self::from_fn(|r, c| vals[r][c]) + } + + fn from_real_array2(vals: [[BatchScalarF64; COLS]; ROWS]) -> Self { + Self::from_fn(|r, c| vals[r][c]) + } + + fn from_f64_array2(vals: [[f64; COLS]; ROWS]) -> Self { + Self::from_fn(|r, c| BatchScalarF64::::from_f64(vals[r][c])) + } + + fn get_elem(&self, idx: [usize; 2]) -> BatchScalarF64 { + self[(idx[0], idx[1])] + } + + fn block_mat2x1( + top_row: BatchMatF64, + bot_row: BatchMatF64, + ) -> Self { + Self::from_fn(|r, c| { + if r < R0 { + top_row[(r, c)] + } else { + bot_row[(r - R0, c)] + } + }) + } + + fn block_mat1x2( + left_col: BatchMatF64, + righ_col: BatchMatF64, + ) -> Self { + Self::from_fn(|r, c| { + if c < C0 { + left_col[(r, c)] + } else { + righ_col[(r, c - C0)] + } + }) + } + + fn block_mat2x2( + top_row: (BatchMatF64, BatchMatF64), + bot_row: (BatchMatF64, BatchMatF64), + ) -> Self { + Self::from_fn(|r, c| { + if r < R0 { + if c < C0 { + top_row.0[(r, c)] + } else { + top_row.1[(r, c - C0)] + } + } else if c < C0 { + bot_row.0[(r - R0, c)] + } else { + bot_row.1[(r - R0, c - C0)] + } + }) + } + + fn mat_mul( + &self, + other: BatchMatF64, + ) -> BatchMatF64 { + self * other + } + + fn set_col_vec(&mut self, c: usize, v: BatchVecF64) { + self.fixed_columns_mut::<1>(c).copy_from(&v); + } + + fn get_fixed_submat( + &self, + start_r: usize, + start_c: usize, + ) -> BatchMatF64 { + self.fixed_view::(start_r, start_c).into() + } + + fn get_col_vec(&self, c: usize) -> BatchVecF64 { + self.fixed_view::(0, c).into() + } + + fn get_row_vec(&self, r: usize) -> BatchVecF64 { + self.fixed_view::<1, COLS>(r, 0).transpose() + } + + fn from_f64(val: f64) -> Self { + Self::from_element(BatchScalarF64::::from_f64(val)) + } + + fn to_dual(self) -> as IsScalar>::DualMatrix { + DualBatchMatrix::from_real_matrix(self) + } + + fn select(self, mask: &Mask, other: Self) -> Self { + self.zip_map(&other, |a, b| a.select(mask, b)) + } + + fn set_elem(&mut self, idx: [usize; 2], val: BatchScalarF64) { + self[(idx[0], idx[1])] = val; + } +} + +impl + IsRealMatrix, ROWS, COLS, BATCH> for BatchMatF64 +where + LaneCount: SupportedLaneCount, +{ +} diff --git a/crates/sophus_core/src/linalg/scalar.rs b/crates/sophus_core/src/linalg/scalar.rs new file mode 100644 index 0000000..1e072cb --- /dev/null +++ b/crates/sophus_core/src/linalg/scalar.rs @@ -0,0 +1,693 @@ +use super::bool_mask::BoolMask; +use super::matrix::IsRealMatrix; +use super::vector::IsRealVector; +use crate::calculus::dual::dual_matrix::DualBatchMatrix; +use crate::calculus::dual::dual_matrix::DualMatrix; +use crate::calculus::dual::dual_matrix::IsDualMatrix; +use crate::calculus::dual::dual_scalar::DualBatchScalar; +use crate::calculus::dual::dual_scalar::DualScalar; +use crate::calculus::dual::dual_scalar::IsDualScalar; +use crate::calculus::dual::dual_vector::DualBatchVector; +use crate::calculus::dual::dual_vector::DualVector; +use crate::calculus::dual::dual_vector::IsDualVector; +use crate::linalg::matrix::IsMatrix; +use crate::linalg::matrix::IsSingleMatrix; +use crate::linalg::vector::IsSingleVector; +use crate::linalg::vector::IsVector; +use crate::linalg::BatchMatF64; +use crate::linalg::BatchScalar; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::MatF64; +use crate::linalg::VecF64; +use approx::assert_abs_diff_eq; +use approx::AbsDiffEq; +use approx::RelativeEq; +use nalgebra::SimdValue; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::AddAssign; +use std::ops::Div; +use std::ops::Mul; +use std::ops::MulAssign; +use std::ops::Neg; +use std::ops::Sub; +use std::ops::SubAssign; +use std::simd::cmp::SimdPartialOrd; +use std::simd::num::SimdFloat; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::Simd; +use std::simd::SimdElement; +use std::simd::StdFloat; +use std::simd::SupportedLaneCount; + +/// Number category +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum NumberCategory { + /// Real number such as f32 or f64 + Real, + /// Unsigned integer such as u8, u16, u32, or u64 + Unsigned, + /// Signed integer such as i8, i16, i32, or i64 + Signed, +} + +/// Trait for scalar and batch scalar linalg +pub trait IsCoreScalar: + Clone + Debug + nalgebra::Scalar + num_traits::Zero + std::ops::AddAssign +{ + /// Get the number category + fn number_category() -> NumberCategory; +} + +macro_rules! def_is_tensor_scalar_single { + ($scalar:ty, $cat:expr) => { + impl IsCoreScalar for $scalar { + fn number_category() -> NumberCategory { + $cat + } + } + }; +} + +def_is_tensor_scalar_single!(u8, NumberCategory::Unsigned); +def_is_tensor_scalar_single!(u16, NumberCategory::Unsigned); +def_is_tensor_scalar_single!(u32, NumberCategory::Unsigned); +def_is_tensor_scalar_single!(u64, NumberCategory::Unsigned); +def_is_tensor_scalar_single!(i8, NumberCategory::Signed); +def_is_tensor_scalar_single!(i16, NumberCategory::Signed); +def_is_tensor_scalar_single!(i32, NumberCategory::Signed); +def_is_tensor_scalar_single!(i64, NumberCategory::Signed); +def_is_tensor_scalar_single!(f32, NumberCategory::Real); +def_is_tensor_scalar_single!(f64, NumberCategory::Real); + +impl IsCoreScalar + for BatchScalar +where + LaneCount: SupportedLaneCount, + Simd: SimdFloat, + BatchScalar: + Clone + Debug + nalgebra::Scalar + num_traits::Zero + std::ops::AddAssign, +{ + fn number_category() -> NumberCategory { + NumberCategory::Real + } +} + +/// Scalar - either a real (f64) or a dual number +pub trait IsScalar: + PartialEq + + Debug + + Clone + + std::ops::Div + + Add + + Mul + + Sub + + AddAssign + + SubAssign + + Sized + + Neg + + AbsDiffEq + + RelativeEq + + IsCoreScalar +{ + /// Scalar type + type Scalar: IsScalar; + + /// Single scalar type + type SingleScalar: IsSingleScalar; + + /// Real scalar type + type RealScalar: IsRealScalar; + + /// Dual scalar type + type DualScalar: IsDualScalar; + + type Mask: BoolMask; + + /// Vector type + type RealMatrix: IsRealMatrix< + Self::RealScalar, + ROWS, + COLS, + BATCH_SIZE, + >; + + /// Vector type + type Vector: IsVector; + + /// Real vector type + type RealVector: IsRealVector; + + /// Dual vector type + type DualVector: IsDualVector; + + /// Matrix type + type Matrix: IsMatrix; + + /// Dual matrix type + type DualMatrix: IsDualMatrix< + Self::DualScalar, + ROWS, + COLS, + BATCH_SIZE, + >; + + fn select(self, mask: &Self::Mask, other: Self) -> Self; + + fn less_equal(&self, rhs: &Self) -> Self::Mask; + + fn greater_equal(&self, rhs: &Self) -> Self::Mask; + + /// absolute value + fn abs(self) -> Self; + + /// arccosine + fn acos(self) -> Self; + + /// arcsine + fn asin(self) -> Self; + + /// arctangent + fn atan(self) -> Self; + + /// arctangent2 + fn atan2(self, x: Self) -> Self; + + /// signum + fn signum(&self) -> Self; + + /// cosine + fn cos(self) -> Self; + + /// floor + fn floor(&self) -> Self::RealScalar; + + /// fractional part + fn fract(self) -> Self; + + /// create a constant scalar + fn from_real_scalar(val: Self::RealScalar) -> Self; + + /// create a constant scalar + fn from_f64(val: f64) -> Self; + + /// return the real part + fn real_part(&self) -> Self::RealScalar; + + fn to_dual(self) -> Self::DualScalar; + + /// sine + fn sin(self) -> Self; + + /// square root + fn sqrt(self) -> Self; + + /// tangent + fn tan(self) -> Self; + + /// return as a vector + fn to_vec(self) -> Self::Vector<1>; + + /// value + fn scalar(self) -> Self { + self + } + + fn from_real_array(arr: [f64; BATCH_SIZE]) -> Self; + + fn real_array(&self) -> [f64; BATCH_SIZE]; + + /// value + fn scalar_examples() -> Vec; + + /// get item + fn extract_single(&self, i: usize) -> Self::SingleScalar; + + /// ones + fn ones() -> Self { + Self::from_f64(1.0) + } + + /// zeros + fn zeros() -> Self { + Self::from_f64(0.0) + } + + /// test suite + fn test_suite() { + let examples = Self::scalar_examples(); + for a in &examples { + let sin_a = a.clone().sin(); + let cos_a = a.clone().cos(); + let val = sin_a.clone() * sin_a + cos_a.clone() * cos_a; + let one = Self::ones(); + + for i in 0..BATCH_SIZE { + assert_abs_diff_eq!(val.extract_single(i), one.extract_single(i)); + } + } + } +} + +/// Real scalar +pub trait IsRealScalar: IsScalar + Copy {} + +/// Scalar +pub trait IsSingleScalar: IsScalar<1> + PartialEq + Div { + /// Scalar vector type + type SingleVector: IsSingleVector; + + /// Matrix type + type SingleMatrix: IsSingleMatrix; + + /// returns single real scalar + fn single_real_scalar(&self) -> f64; + + /// returns single real scalar + fn single_scalar(&self) -> Self; + + /// get element + fn i64_floor(&self) -> i64; +} + +/// Batch scalar +pub trait IsBatchScalar: IsScalar {} + +impl IsRealScalar<1> for f64 {} + +impl IsScalar<1> for f64 { + type Scalar = f64; + type RealScalar = f64; + type SingleScalar = f64; + type DualScalar = DualScalar; + type Vector = VecF64; + type Matrix = MatF64; + type RealVector = VecF64; + type RealMatrix = MatF64; + + type Mask = bool; + + fn less_equal(&self, rhs: &Self) -> Self::Mask { + self <= rhs + } + + fn greater_equal(&self, rhs: &Self) -> Self::Mask { + self >= rhs + } + + fn scalar_examples() -> Vec { + vec![1.0, 2.0, 3.0] + } + + fn abs(self) -> f64 { + f64::abs(self) + } + + fn cos(self) -> f64 { + f64::cos(self) + } + + fn sin(self) -> f64 { + f64::sin(self) + } + + fn sqrt(self) -> f64 { + f64::sqrt(self) + } + + fn from_f64(val: f64) -> f64 { + val + } + + fn from_real_scalar(val: f64) -> f64 { + val + } + + fn atan2(self, x: Self) -> Self { + self.atan2(x) + } + + fn from_real_array(arr: [Self::RealScalar; 1]) -> Self { + arr[0] + } + + fn real_array(&self) -> [Self::RealScalar; 1] { + [*self] + } + + fn real_part(&self) -> f64 { + *self + } + + fn to_vec(self) -> VecF64<1> { + VecF64::<1>::new(self) + } + + fn tan(self) -> Self { + self.tan() + } + + fn acos(self) -> Self { + self.acos() + } + + fn asin(self) -> Self { + self.asin() + } + + fn atan(self) -> Self { + self.atan() + } + + fn fract(self) -> Self { + f64::fract(self) + } + + fn floor(&self) -> f64 { + f64::floor(*self) + } + + fn extract_single(&self, i: usize) -> f64 { + self.extract(i) + } + + fn signum(&self) -> Self { + f64::signum(*self) + } + + type DualVector = DualVector; + + type DualMatrix = DualMatrix; + + fn to_dual(self) -> Self::DualScalar { + DualScalar::from_f64(self) + } + + fn select(self, mask: &Self::Mask, other: Self) -> Self { + if *mask { + self + } else { + other + } + } +} + +impl IsSingleScalar for f64 { + type SingleMatrix = MatF64; + + type SingleVector = VecF64; + + fn single_real_scalar(&self) -> f64 { + *self + } + + fn single_scalar(&self) -> Self { + *self + } + + fn i64_floor(&self) -> i64 { + self.floor() as i64 + } +} + +impl AbsDiffEq for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Epsilon = f64; + + fn default_epsilon() -> f64 { + f64::default_epsilon() + } + + fn abs_diff_eq(&self, other: &Self, epsilon: f64) -> bool { + for i in 0..BATCH { + if !self.0[i].abs_diff_eq(&other.0[i], epsilon) { + return false; + } + } + true + } +} + +impl RelativeEq for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + fn default_max_relative() -> Self::Epsilon { + f64::default_max_relative() + } + + fn relative_eq( + &self, + other: &Self, + epsilon: Self::Epsilon, + max_relative: Self::Epsilon, + ) -> bool { + for i in 0..BATCH { + if !self.0[i].relative_eq(&other.0[i], epsilon, max_relative) { + return false; + } + } + true + } +} + +impl AddAssign for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + fn add_assign(&mut self, rhs: Self) { + self.0 += rhs.0; + } +} + +impl SubAssign for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + fn sub_assign(&mut self, rhs: Self) { + self.0 -= rhs.0; + } +} + +impl MulAssign for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + fn mul_assign(&mut self, rhs: Self) { + self.0 *= rhs.0; + } +} + +impl Neg for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Output = Self; + + fn neg(self) -> Self { + BatchScalarF64 { 0: -self.0 } + } +} + +impl Sub for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + BatchScalarF64 { 0: self.0 - rhs.0 } + } +} + +impl Mul for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Output = Self; + + fn mul(self, rhs: Self) -> Self { + BatchScalarF64 { 0: self.0 * rhs.0 } + } +} + +impl Div for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Output = Self; + + fn div(self, rhs: Self) -> Self { + BatchScalarF64 { 0: self.0 / rhs.0 } + } +} + +impl num_traits::One for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + fn one() -> Self { + Self(Simd::::splat(1.0)) + } +} +impl IsRealScalar for BatchScalarF64 where + LaneCount: SupportedLaneCount +{ +} + +impl IsBatchScalar for BatchScalarF64 where + LaneCount: SupportedLaneCount +{ +} + +impl IsScalar for BatchScalarF64 +where + LaneCount: SupportedLaneCount, +{ + type Scalar = BatchScalarF64; + type RealScalar = Self; + type SingleScalar = f64; + type DualScalar = DualBatchScalar; + + type RealVector = BatchVecF64; + + type Vector = BatchVecF64; + type Matrix = BatchMatF64; + type RealMatrix = BatchMatF64; + + type Mask = Mask; + + fn less_equal(&self, rhs: &Self) -> Self::Mask { + self.0.simd_le(rhs.0) + } + + fn greater_equal(&self, rhs: &Self) -> Self::Mask { + self.0.simd_ge(rhs.0) + } + + fn scalar_examples() -> Vec> { + vec![ + BatchScalarF64::::from_f64(1.0), + BatchScalarF64::::from_f64(2.0), + BatchScalarF64::::from_f64(3.0), + ] + } + + fn extract_single(&self, i: usize) -> f64 { + self.0[i] + } + + fn from_real_scalar(val: BatchScalarF64) -> Self { + val + } + + fn real_part(&self) -> Self { + *self + } + + fn from_f64(val: f64) -> Self { + BatchScalarF64 { + 0: Simd::::splat(val), + } + } + + fn abs(self) -> Self { + BatchScalarF64 { + 0: SimdFloat::abs(self.0), + } + } + + fn from_real_array(arr: [f64; BATCH]) -> Self { + BatchScalarF64 { + 0: Simd::::from_array(arr), + } + } + + fn real_array(&self) -> [f64; BATCH] { + self.0.to_array() + } + + fn cos(self) -> Self { + BatchScalarF64 { 0: self.0.cos() } + } + + fn sin(self) -> Self { + BatchScalarF64 { 0: self.0.sin() } + } + + fn tan(self) -> Self { + BatchScalarF64 { + 0: sleef::Sleef::tan(self.0), + } + } + + fn acos(self) -> Self { + BatchScalarF64 { + 0: sleef::Sleef::acos(self.0), + } + } + + fn asin(self) -> Self { + BatchScalarF64 { + 0: sleef::Sleef::asin(self.0), + } + } + + fn atan(self) -> Self { + BatchScalarF64 { + 0: sleef::Sleef::atan(self.0), + } + } + + fn sqrt(self) -> Self { + BatchScalarF64 { 0: self.0.sqrt() } + } + + fn atan2(self, x: Self) -> Self { + BatchScalarF64 { + 0: sleef::Sleef::atan2(self.0, x.0), + } + } + + fn to_vec(self) -> Self::Vector<1> { + BatchVecF64::<1, BATCH>::from_scalar(self) + } + + fn fract(self) -> Self { + BatchScalarF64 { 0: self.0.fract() } + } + + fn floor(&self) -> BatchScalarF64 { + BatchScalarF64 { 0: self.0.floor() } + } + + fn signum(&self) -> Self { + BatchScalarF64 { 0: self.0.signum() } + } + + type DualVector = DualBatchVector; + + type DualMatrix = DualBatchMatrix; + + fn to_dual(self) -> Self::DualScalar { + DualBatchScalar::from_real_scalar(self) + } + + fn select(self, mask: &Self::Mask, other: Self) -> Self { + BatchScalarF64 { + 0: mask.select(self.0, other.0), + } + } +} + +#[test] +fn scalar_prop_tests() { + f64::test_suite(); + BatchScalarF64::<2>::test_suite(); + BatchScalarF64::<4>::test_suite(); + BatchScalarF64::<8>::test_suite(); +} diff --git a/crates/sophus_core/src/linalg/vector.rs b/crates/sophus_core/src/linalg/vector.rs new file mode 100644 index 0000000..09cad77 --- /dev/null +++ b/crates/sophus_core/src/linalg/vector.rs @@ -0,0 +1,393 @@ +use approx::AbsDiffEq; +use approx::RelativeEq; + +use super::scalar::IsRealScalar; +use super::scalar::IsScalar; +use super::scalar::IsSingleScalar; +use crate::calculus::dual::dual_vector::DualBatchVector; +use crate::calculus::dual::dual_vector::DualVector; +use crate::linalg::BatchMatF64; +use crate::linalg::BatchScalarF64; +use crate::linalg::BatchVecF64; +use crate::linalg::MatF64; +use crate::linalg::VecF64; +use std::fmt::Debug; +use std::ops::Add; +use std::ops::Index; +use std::ops::IndexMut; +use std::ops::Neg; +use std::ops::Sub; +use std::simd::LaneCount; +use std::simd::Mask; +use std::simd::SupportedLaneCount; + +/// Vector - either a real (f64) or a dual number vector +pub trait IsVector, const ROWS: usize, const BATCH_SIZE: usize>: + Clone + + Neg + + Add + + Sub + + Neg + + Debug + + AbsDiffEq + + RelativeEq +{ + fn vector(self) -> Self; + + /// create a block vector + fn block_vec2( + top_row: S::Vector, + bot_row: S::Vector, + ) -> Self; + + fn to_dual( + self, + ) -> <>::DualScalar as IsScalar>::Vector; + + /// dot product + fn dot(self, rhs: Self) -> S; + + fn outer(self, rhs: S::Vector) -> S::Matrix; + + /// create a vector from an array + fn from_array(vals: [S; ROWS]) -> Self; + + fn select(self, mask: &S::Mask, other: Self) -> Self; + + /// create a constant vector from an array + fn from_real_array(vals: [S::RealScalar; ROWS]) -> Self; + + /// create a constant vector + fn from_real_vector(val: S::RealVector) -> Self; + + /// create a constant scalar + fn from_f64(val: f64) -> Self; + + /// create a constant vector from an array + fn from_f64_array(vals: [f64; ROWS]) -> Self; + + /// create a constant vector from an array + fn from_scalar_array(vals: [S; ROWS]) -> Self; + + /// get ith element + fn get_elem(&self, idx: usize) -> S; + + /// get fixed rows + fn get_fixed_rows(&self, start: usize) -> S::Vector; + + /// norm + fn norm(&self) -> S; + + /// return normalized vector + fn normalized(&self) -> Self; + + /// return the real part + fn real_vector(&self) -> &S::RealVector; + + /// return scaled vector + fn scaled(&self, v: S) -> Self; + + /// set ith element as constant + fn set_elem(&mut self, idx: usize, v: S); + + /// set ith element as constant + fn set_real_elem(&mut self, idx: usize, v: S::RealScalar); + + /// squared norm + fn squared_norm(&self) -> S; + + /// return the matrix representation + fn to_mat(self) -> S::Matrix; + + /// ones + fn ones() -> Self { + Self::from_f64(1.0) + } + + /// zeros + fn zeros() -> Self { + Self::from_f64(0.0) + } + + /// get fixed submatrix + fn get_fixed_subvec(&self, start_r: usize) -> S::Vector; +} + +/// is real vector like +pub trait IsRealVector< + S: IsRealScalar + IsScalar, + const ROWS: usize, + const BATCH_SIZE: usize, +>: + IsVector + Index + IndexMut + Copy +{ +} + +/// Batch scalar +pub trait IsBatchVector: IsScalar { + /// get item + fn extract_single(&self, i: usize) -> Self::SingleScalar; +} + +/// is scalar vector +pub trait IsSingleVector: IsVector { + /// set real scalar + fn set_real_scalar(&mut self, idx: usize, v: f64); +} + +impl IsSingleVector for VecF64 { + fn set_real_scalar(&mut self, idx: usize, v: f64) { + self[idx] = v; + } +} + +impl IsRealVector for VecF64 {} + +impl IsVector for VecF64 { + fn vector(self) -> Self { + self + } + + fn block_vec2( + top_row: VecF64, + bot_row: VecF64, + ) -> Self { + assert_eq!(ROWS, R0 + R1); + let mut m = Self::zeros(); + + m.fixed_view_mut::(0, 0).copy_from(&top_row); + m.fixed_view_mut::(R0, 0).copy_from(&bot_row); + m + } + + fn from_array(vals: [f64; ROWS]) -> VecF64 { + VecF64::::from_row_slice(&vals[..]) + } + + fn from_real_array(vals: [f64; ROWS]) -> Self { + VecF64::::from_row_slice(&vals[..]) + } + + fn from_real_vector(val: VecF64) -> Self { + val + } + + fn from_f64_array(vals: [f64; ROWS]) -> Self { + VecF64::::from_row_slice(&vals[..]) + } + + fn from_scalar_array(vals: [f64; ROWS]) -> Self { + VecF64::::from_row_slice(&vals[..]) + } + + fn get_elem(&self, idx: usize) -> f64 { + self[idx] + } + + fn get_fixed_rows(&self, start: usize) -> VecF64 { + self.fixed_rows::(start).into() + } + + fn norm(&self) -> f64 { + self.norm() + } + + fn real_vector(&self) -> &Self { + self + } + + fn set_elem(&mut self, idx: usize, v: f64) { + self[idx] = v; + } + + fn set_real_elem(&mut self, idx: usize, v: f64) { + self[idx] = v; + } + + fn squared_norm(&self) -> f64 { + self.norm_squared() + } + + fn to_mat(self) -> MatF64 { + self + } + + fn scaled(&self, v: f64) -> Self { + self * v + } + + fn dot(self, rhs: Self) -> f64 { + VecF64::dot(&self, &rhs) + } + + fn normalized(&self) -> Self { + self.normalize() + } + + fn from_f64(val: f64) -> Self { + VecF64::::from_element(val) + } + + fn to_dual(self) -> >::DualVector { + DualVector::from_real_vector(self) + } + + fn outer(self, rhs: VecF64) -> MatF64 { + self * rhs.transpose() + } + + fn select(self, mask: &bool, other: Self) -> Self { + if *mask { + self + } else { + other + } + } + + fn get_fixed_subvec(&self, start_r: usize) -> VecF64 { + self.fixed_rows::(start_r).into() + } +} + +/// cross product +pub fn cross, const BATCH: usize>( + lhs: S::Vector<3>, + rhs: S::Vector<3>, +) -> S::Vector<3> { + let l0 = lhs.get_elem(0); + let l1 = lhs.get_elem(1); + let l2 = lhs.get_elem(2); + + let r0 = rhs.get_elem(0); + let r1 = rhs.get_elem(1); + let r2 = rhs.get_elem(2); + + S::Vector::from_array([ + l1.clone() * r2.clone() - l2.clone() * r1.clone(), + l2 * r0.clone() - l0.clone() * r2, + l0 * r1 - l1 * r0, + ]) +} + +impl IsVector, ROWS, BATCH> + for BatchVecF64 +where + LaneCount: SupportedLaneCount, +{ + fn block_vec2( + top_row: BatchVecF64, + bot_row: BatchVecF64, + ) -> Self { + assert_eq!(ROWS, R0 + R1); + let mut m = Self::zeros(); + + m.fixed_view_mut::(0, 0).copy_from(&top_row); + m.fixed_view_mut::(R0, 0).copy_from(&bot_row); + m + } + + fn vector(self) -> Self { + self + } + + fn dot(self, rhs: Self) -> BatchScalarF64 { + (self.transpose() * &rhs)[0] + } + + fn from_array(vals: [BatchScalarF64; ROWS]) -> Self { + Self::from_fn(|i, _| vals[i]) + } + + fn from_real_array(vals: [BatchScalarF64; ROWS]) -> Self { + Self::from_fn(|i, _| vals[i]) + } + + fn from_f64_array(vals: [f64; ROWS]) -> Self { + Self::from_fn(|i, _| BatchScalarF64::::from_f64(vals[i])) + } + + fn from_scalar_array(vals: [BatchScalarF64; ROWS]) -> Self { + Self::from_fn(|i, _| vals[i]) + } + + fn from_real_vector(val: BatchVecF64) -> Self { + val + } + + fn get_elem(&self, idx: usize) -> BatchScalarF64 { + self[idx] + } + + fn get_fixed_rows(&self, start: usize) -> BatchVecF64 { + self.fixed_rows::(start).into() + } + + fn norm(&self) -> BatchScalarF64 { + self.squared_norm().sqrt() + } + + fn normalized(&self) -> Self { + let norm = self.norm(); + if norm == BatchScalarF64::::zeros() { + return *self; + } + let factor = BatchScalarF64::::ones() / norm; + self * factor + } + + fn real_vector(&self) -> &BatchVecF64 { + self + } + + fn scaled(&self, v: BatchScalarF64) -> Self { + self * v + } + + fn set_elem(&mut self, idx: usize, v: BatchScalarF64) { + self[idx] = v; + } + + fn set_real_elem(&mut self, idx: usize, v: BatchScalarF64) { + self[idx] = v; + } + + fn squared_norm(&self) -> BatchScalarF64 { + let mut squared_norm = BatchScalarF64::::zeros(); + for i in 0..ROWS { + let val = self.get_elem(i); + squared_norm += val * val; + } + squared_norm + } + + fn to_mat(self) -> BatchMatF64 { + self + } + + fn from_f64(val: f64) -> Self { + Self::from_element(BatchScalarF64::::from_f64(val)) + } + + fn to_dual(self) -> as IsScalar>::DualVector { + DualBatchVector::from_real_vector(self) + } + + fn outer(self, rhs: BatchVecF64) -> BatchMatF64 { + self * rhs.transpose() + } + + fn select(self, mask: &Mask, other: Self) -> Self { + self.zip_map(&other, |a, b| a.select(mask, b)) + } + + fn get_fixed_subvec(&self, start_r: usize) -> BatchVecF64 { + self.fixed_rows::(start_r).into() + } +} + +impl IsRealVector, ROWS, BATCH> + for BatchVecF64 +where + LaneCount: SupportedLaneCount, +{ +} diff --git a/crates/sophus_core/src/params.rs b/crates/sophus_core/src/params.rs new file mode 100644 index 0000000..70c3718 --- /dev/null +++ b/crates/sophus_core/src/params.rs @@ -0,0 +1,53 @@ +use crate::linalg::scalar::IsScalar; +use crate::linalg::VecF64; +use crate::points::example_points; + +/// Parameter implementation. +pub trait ParamsImpl, const PARAMS: usize, const BATCH_SIZE: usize> { + /// Is the parameter vector valid? + fn are_params_valid(params: &S::Vector) -> S::Mask; + /// Examples of valid parameter vectors. + fn params_examples() -> Vec>; + /// Examples of invalid parameter vectors. + fn invalid_params_examples() -> Vec>; +} + +/// A trait for linalg that have parameters. +pub trait HasParams, const PARAMS: usize, const BATCH_SIZE: usize>: + ParamsImpl +{ + /// Create from parameters. + fn from_params(params: &S::Vector) -> Self; + /// Set parameters. + fn set_params(&mut self, params: &S::Vector); + /// Get parameters. + fn params(&self) -> &S::Vector; +} + +impl ParamsImpl for VecF64 { + fn are_params_valid(_params: &VecF64) -> bool { + true + } + + fn params_examples() -> Vec> { + example_points::() + } + + fn invalid_params_examples() -> Vec> { + vec![] + } +} + +impl HasParams for VecF64 { + fn from_params(params: &VecF64) -> Self { + *params + } + + fn set_params(&mut self, params: &VecF64) { + *self = *params + } + + fn params(&self) -> &VecF64 { + self + } +} diff --git a/crates/sophus_core/src/points.rs b/crates/sophus_core/src/points.rs new file mode 100644 index 0000000..457a067 --- /dev/null +++ b/crates/sophus_core/src/points.rs @@ -0,0 +1,28 @@ +use crate::linalg::scalar::IsScalar; +use crate::linalg::vector::IsVector; + +/// Example points +pub fn example_points, const POINT: usize, const BATCH: usize>( +) -> Vec> { + let points4 = vec![ + S::Vector::<4>::from_f64_array([0.1, 0.0, 0.0, 0.0]), + S::Vector::<4>::from_f64_array([1.0, 4.0, 1.0, 0.5]), + S::Vector::<4>::from_f64_array([0.7, 5.0, 1.1, (-5.0)]), + S::Vector::<4>::from_f64_array([1.0, 3.0, 1.0, 0.5]), + S::Vector::<4>::from_f64_array([0.7, 5.0, 0.8, (-5.0)]), + S::Vector::<4>::from_f64_array([1.0, 3.0, 1.0, 0.5]), + S::Vector::<4>::from_f64_array([-0.7, 5.0, 0.1, (-5.0)]), + S::Vector::<4>::from_f64_array([2.0, (-3.0), 1.0, 0.5]), + ]; + + let mut out: Vec> = vec![]; + for p4 in points4 { + let mut v = S::Vector::::zeros(); + for i in 0..POINT.min(4) { + let val = p4.get_elem(i); + v.set_elem(i, val); + } + out.push(v) + } + out +} diff --git a/crates/sophus_tensor/src/lib.rs b/crates/sophus_core/src/tensor.rs similarity index 80% rename from crates/sophus_tensor/src/lib.rs rename to crates/sophus_core/src/tensor.rs index cb272b6..1f100ee 100644 --- a/crates/sophus_tensor/src/lib.rs +++ b/crates/sophus_core/src/tensor.rs @@ -8,6 +8,6 @@ pub mod element; /// Mutable tensor pub mod mut_tensor; /// Mutable tensor view -pub mod mut_view; +pub mod mut_tensor_view; /// Tensor view -pub mod view; +pub mod tensor_view; diff --git a/crates/sophus_core/src/tensor/arc_tensor.rs b/crates/sophus_core/src/tensor/arc_tensor.rs new file mode 100644 index 0000000..9e98b00 --- /dev/null +++ b/crates/sophus_core/src/tensor/arc_tensor.rs @@ -0,0 +1,435 @@ +use ndarray::Dimension; + +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::SMat; +use crate::linalg::SVec; +use crate::tensor::element::IsStaticTensor; +use crate::tensor::mut_tensor::InnerScalarToVec; +use crate::tensor::mut_tensor::InnerVecToMat; +use crate::tensor::mut_tensor::MutTensor; +use crate::tensor::tensor_view::IsTensorLike; +use crate::tensor::tensor_view::IsTensorView; +use crate::tensor::tensor_view::TensorView; + +use std::marker::PhantomData; + +/// Arc tensor - a tensor with shared ownership +/// +/// See TensorView for more details of the tensor structure +#[derive(Debug, Clone)] +pub struct ArcTensor< + const TOTAL_RANK: usize, + const DRANK: usize, + const SRANK: usize, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, +> where + ndarray::Dim<[ndarray::Ix; DRANK]>: Dimension, +{ + /// ndarray of tensors with shape [D1, D2, ...] + pub array: ndarray::ArcArray>, + phantom: PhantomData<(Scalar, STensor)>, +} + +/// rank-1 tensor of scalars +pub type ArcTensorX = ArcTensor; + +/// rank-2 tensor of vectors with shape R +pub type ArcTensorXR< + const TOTAL_RANK: usize, + const DRANK: usize, + const SRANK: usize, + Scalar, + const R: usize, +> = ArcTensor, R, 1>; + +/// rank-2 tensor of matrices with shape [R x C] +pub type ArcTensorXRC< + const TOTAL_RANK: usize, + const DRANK: usize, + const SRANK: usize, + Scalar, + const R: usize, + const C: usize, +> = ArcTensor, R, C>; + +/// rank-1 tensor of scalars with shape D0 +pub type ArcTensorD = ArcTensorX; + +/// rank-2 tensor of scalars with shape [D0 x D1] +pub type ArcTensorDD = ArcTensorX<2, Scalar>; + +/// rank-2 tensor of vectors with shape [D0 x R] +pub type ArcTensorDR = ArcTensorXR<2, 1, 1, Scalar, R>; + +/// rank-3 tensor of scalars with shape [D0 x D1 x D2] +pub type ArcTensorRRR = ArcTensorX<3, Scalar>; + +/// rank-3 tensor of vectors with shape [D0 x D1 x R] +pub type ArcTensorDDR = ArcTensorXR<3, 2, 1, Scalar, R>; + +/// rank-3 tensor of matrices with shape [D0 x R x C] +pub type ArcTensorDRC = ArcTensorXRC<3, 1, 2, Scalar, R, C>; + +/// rank-4 tensor of scalars with shape [D0 x D1 x D2 x D3] +pub type ArcTensorDDDD = ArcTensorX<4, Scalar>; + +/// rank-4 tensor of vectors with shape [D0 x D1 x D2 x R] +pub type ArcTensorDDDR = ArcTensorXR<4, 3, 1, Scalar, R>; + +/// rank-4 tensor of matrices with shape [D0 x R x C x B] +pub type ArcTensorDDRC = + ArcTensorXRC<4, 2, 2, Scalar, R, C>; + +macro_rules! arc_tensor_is_tensor_view { + ($scalar_rank:literal, $srank:literal,$drank:literal) => { + + + impl< + 'a, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, + > IsTensorLike< + 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS + > for ArcTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + fn elem_view<'b:'a>( + &'b self, + ) -> ndarray::ArrayView<'a, STensor, ndarray::Dim<[ndarray::Ix; $drank]>> { + self.view().elem_view + } + + fn get(& self, idx: [usize; $drank]) -> STensor { + self.view().get(idx) + } + + fn dims(&self) -> [usize; $drank] { + self.view().dims() + } + + fn scalar_view<'b:'a>( + &'b self, + ) -> ndarray::ArrayView<'a, Scalar, ndarray::Dim<[ndarray::Ix; $scalar_rank]>> { + self.view().scalar_view + } + + fn scalar_get(&'a self, idx: [usize; $scalar_rank]) -> Scalar { + self.view().scalar_get(idx) + } + + fn scalar_dims(&self) -> [usize; $scalar_rank] { + self.view().scalar_dims() + } + + fn to_mut_tensor( + &self, + ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { + MutTensor { + mut_array: self.elem_view().to_owned(), + phantom: PhantomData::default(), + } + } + } + + impl< + 'a, + Scalar: IsCoreScalar+ 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, + > + ArcTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + /// create a new tensor from a shape - all elements are zero + pub fn from_shape(size: [usize; $drank]) -> Self { + Self::from_mut_tensor( + MutTensor::< + $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, + >::from_shape(size)) + } + + /// create a new tensor from a binary operation applied to two tensor views + pub fn from_map2< + 'b, + const OTHER_HRANK: usize, const OTHER_SRANK: usize, + OtherScalar: IsCoreScalar + 'static, + OtherSTensor: IsStaticTensor< + OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS + > + 'static, + const OTHER_ROWS: usize, const OTHER_COLS: usize, + V : IsTensorView::< + 'b, + OTHER_HRANK, $drank, OTHER_SRANK, + OtherScalar, OtherSTensor, + OTHER_ROWS, OTHER_COLS, + >, + const OTHER_HRANK2: usize, const OTHER_SRANK2: usize, + OtherScalar2: IsCoreScalar + 'static, + OtherSTensor2: IsStaticTensor< + OtherScalar2, OTHER_SRANK2, OTHER_ROWS2, OTHER_COLS2, + > + 'static, + const OTHER_ROWS2: usize, const OTHER_COLS2: usize, + V2 : IsTensorView::<'b, + OTHER_HRANK2, $drank, OTHER_SRANK2, + OtherScalar2, OtherSTensor2, + OTHER_ROWS2, OTHER_COLS2, + >, + F: FnMut(&OtherSTensor, &OtherSTensor2) -> STensor + > ( + view: &'b V, + view2: &'b V2, + op: F, + ) + -> Self where + ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, + ndarray::Dim<[ndarray::Ix; OTHER_HRANK2]>: ndarray::Dimension + { + Self::from_mut_tensor( + MutTensor::< + $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, + >::from_map2(view,view2, op), + ) + } + } + + impl< + 'a, + Scalar: IsCoreScalar+ 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, + > + ArcTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + + /// create a new tensor from a tensor view + pub fn make_copy_from( + v: &TensorView<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + ) -> Self + { + Self::from_mut_tensor(IsTensorLike::to_mut_tensor(v)) + } + + /// create a new tensor from a mutable tensor + pub fn from_mut_tensor( + tensor: + MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS>, + ) -> Self { + Self { + array: tensor.mut_array.into(), + phantom: PhantomData {}, + } + } + + /// create a new tensor from a shape and a value + pub fn from_shape_and_val( + shape: [usize; $drank], + val:STensor, + ) -> Self { + Self::from_mut_tensor( + MutTensor::< + $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS + >::from_shape_and_val(shape, val), + ) + } + + /// return a tensor view + pub fn view<'b: 'a>(&'b self) + -> TensorView< + 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + TensorView::< + 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS + >::new( + self.array.view() + ) + } + + /// create a new tensor from a unary operation applied to a tensor view + pub fn from_map< + 'b, + const OTHER_HRANK: usize, const OTHER_SRANK: usize, + OtherScalar: IsCoreScalar+ 'static, + OtherSTensor: IsStaticTensor< + OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS + > + 'static, + const OTHER_ROWS: usize, const OTHER_COLS: usize, + V : IsTensorView::< + 'b, + OTHER_HRANK, $drank, OTHER_SRANK, + OtherScalar, OtherSTensor, + OTHER_ROWS, OTHER_COLS, + >, + F: FnMut(&OtherSTensor)-> STensor + >( + view: &'b V, + op: F, + ) + -> Self + where + ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, + ndarray::Dim<[ndarray::Ix; $drank]>: ndarray::Dimension, + { + Self::from_mut_tensor( + MutTensor::< + $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS + >::from_map(view, op), + ) + } + } + }; +} + +impl InnerVecToMat<3, 1, 2, 4, 2, Scalar, ROWS> + for ArcTensorXR<3, 2, 1, Scalar, ROWS> +{ + fn inner_vec_to_mat(self) -> ArcTensorXRC<4, 2, 2, Scalar, ROWS, 1> { + ArcTensorXRC::<4, 2, 2, Scalar, ROWS, 1> { + array: self.array, + phantom: PhantomData, + } + } + + type Output = ArcTensorXRC<4, 2, 2, Scalar, ROWS, 1>; +} + +impl InnerScalarToVec<2, 0, 2, 3, 1, Scalar> + for ArcTensorX<2, Scalar> +{ + fn inner_scalar_to_vec(self) -> ArcTensorXR<3, 2, 1, Scalar, 1> { + ArcTensorXR::<3, 2, 1, Scalar, 1> { + array: self + .array + .map(|x| SVec::::new(x.clone())) + .to_shared(), + phantom: PhantomData, + } + } + + type Output = ArcTensorXR<3, 2, 1, Scalar, 1>; +} + +arc_tensor_is_tensor_view!(1, 0, 1); +arc_tensor_is_tensor_view!(2, 0, 2); +arc_tensor_is_tensor_view!(2, 1, 1); +arc_tensor_is_tensor_view!(3, 0, 3); +arc_tensor_is_tensor_view!(3, 1, 2); +arc_tensor_is_tensor_view!(3, 2, 1); +arc_tensor_is_tensor_view!(4, 0, 4); +arc_tensor_is_tensor_view!(4, 1, 3); +arc_tensor_is_tensor_view!(4, 2, 2); +arc_tensor_is_tensor_view!(5, 0, 5); +arc_tensor_is_tensor_view!(5, 1, 4); +arc_tensor_is_tensor_view!(5, 2, 3); + +#[test] +fn arc_tensor_tests() { + //from_mut_tensor + + use crate::tensor::mut_tensor::MutTensorDDDR; + use crate::tensor::mut_tensor::MutTensorDDR; + use crate::tensor::mut_tensor::MutTensorDR; + + { + let shape = [4]; + let mut_img = MutTensorDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let copy = MutTensorDR::make_copy_from(&mut_img.view()); + assert_eq!(copy.view().dims(), shape); + let img = ArcTensorDR::from_mut_tensor(copy); + assert_eq!(img.view().dims(), shape); + let mut_img2 = ArcTensorDR::from_mut_tensor(mut_img.clone()); + assert_eq!( + mut_img2.view().elem_view().as_slice().unwrap(), + mut_img.view().elem_view().as_slice().unwrap() + ); + } + { + let shape = [4, 2]; + let mut_img = MutTensorDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let copy = MutTensorDDR::make_copy_from(&mut_img.view()); + assert_eq!(copy.dims(), shape); + let img = ArcTensorDDR::from_mut_tensor(copy); + assert_eq!(img.dims(), shape); + assert_eq!( + img.view().elem_view().as_slice().unwrap(), + mut_img.view().elem_view().as_slice().unwrap() + ); + } + { + let shape = [3, 2, 7]; + let mut_img = MutTensorDDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let copy = MutTensorDDDR::make_copy_from(&mut_img.view()); + assert_eq!(copy.dims(), shape); + let img = ArcTensorDDDR::from_mut_tensor(copy); + assert_eq!(img.dims(), shape); + assert_eq!( + img.view().elem_view().as_slice().unwrap(), + mut_img.view().elem_view().as_slice().unwrap() + ); + } + + // shared_ownership + { + let shape = [4]; + let mut_img = MutTensorDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let img = ArcTensorDR::from_mut_tensor(mut_img); + + let img2 = img.clone(); + assert_eq!( + img.view().elem_view().as_slice().unwrap(), + img2.view().elem_view().as_slice().unwrap() + ); + + let mut_img2 = img2.to_mut_tensor(); + assert_ne!( + mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), + img2.view().elem_view().as_slice().unwrap().as_ptr() + ); + } + { + let shape = [4, 6]; + let mut_img = MutTensorDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let img = ArcTensorDDR::from_mut_tensor(mut_img); + + let img2 = img.clone(); + let mut_img2 = img2.to_mut_tensor(); + assert_ne!( + mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), + img2.view().elem_view().as_slice().unwrap().as_ptr() + ); + } + { + let shape = [4, 6, 7]; + let mut_img = MutTensorDDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); + let img = ArcTensorDDDR::from_mut_tensor(mut_img); + + let img2 = img.clone(); + let mut_img2 = img2.to_mut_tensor(); + assert_ne!( + mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), + img2.view().elem_view().as_slice().unwrap().as_ptr() + ); + } + + // multi_threading + use crate::tensor::arc_tensor::ArcTensorDDRC; + use crate::tensor::mut_tensor::MutTensorDDRC; + use std::thread; + + let shape = [4, 6]; + let mut_img = MutTensorDDRC::from_shape_and_val(shape, SVec::::new(10, 20, 300)); + let img = ArcTensorDDRC::from_mut_tensor(mut_img); + + thread::scope(|s| { + s.spawn(|| { + println!("{:?}", img); + }); + s.spawn(|| { + println!("{:?}", img); + }); + }); +} diff --git a/crates/sophus_core/src/tensor/element.rs b/crates/sophus_core/src/tensor/element.rs new file mode 100644 index 0000000..fae726b --- /dev/null +++ b/crates/sophus_core/src/tensor/element.rs @@ -0,0 +1,193 @@ +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::scalar::NumberCategory; +use crate::linalg::SMat; +use crate::linalg::SVec; + +use std::fmt::Debug; +pub use typenum::generic_const_mappings::Const; + +/// Trait for static tensors +pub trait IsStaticTensor< + Scalar: IsCoreScalar + 'static, + const SRANK: usize, + const ROWS: usize, + const COLS: usize, +>: Clone + Debug + num_traits::Zero +{ + /// Set zeros + fn zeros() -> Self { + Self::from_slice(&vec![Scalar::zero(); Self::num_scalars()]) + } + + /// Returns ith scalar element + fn scalar(&self, idx: [usize; SRANK]) -> &Scalar; + + /// Get the rank + fn rank(&self) -> usize { + SRANK + } + + /// Get the number of rows + fn num_rows(&self) -> usize { + ROWS + } + + /// Get the number of columns + fn num_cols(&self) -> usize { + COLS + } + + /// Get the compile time shape as an array + fn sdims() -> [usize; SRANK]; + + /// Number of scalar elements + fn num_scalars() -> usize { + ROWS * COLS + } + + /// Get the stride as an array + fn strides() -> [usize; SRANK]; + + /// Create a tensor from a slice + fn from_slice(slice: &[Scalar]) -> Self; +} + +// Rank 0 tensors +// +// a scalar +impl IsStaticTensor for Scalar { + fn scalar(&self, _idx: [usize; 0]) -> &Scalar { + self + } + + fn sdims() -> [usize; 0] { + [] + } + + fn strides() -> [usize; 0] { + [] + } + + fn from_slice(slice: &[Scalar]) -> Self { + slice[0].clone() + } +} + +// A vector +impl IsStaticTensor + for SVec +{ + fn scalar(&self, idx: [usize; 1]) -> &Scalar { + &self[idx[0]] + } + + fn sdims() -> [usize; 1] { + [ROWS] + } + + fn strides() -> [usize; 1] { + [1] + } + + fn from_slice(slice: &[Scalar]) -> Self { + SVec::from_iterator(slice.iter().cloned()) + } +} + +// a matrix +impl + IsStaticTensor for SMat +{ + fn scalar(&self, idx: [usize; 2]) -> &Scalar { + &self[(idx[0], idx[1])] + } + + fn sdims() -> [usize; 2] { + [ROWS, COLS] + } + + fn strides() -> [usize; 2] { + [1, ROWS] + } + + fn from_slice(slice: &[Scalar]) -> Self { + SMat::from_iterator(slice.iter().cloned()) + } +} + +/// Format of a static tensor +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct STensorFormat { + /// Number category + pub number_category: NumberCategory, + /// Number of bytes per scalar + pub num_bytes_per_scalar: usize, + /// batch size + pub batch_size: usize, + /// number of rows + pub num_rows: usize, + /// number of columns + pub num_cols: usize, +} + +impl STensorFormat { + /// Create a new tensor format struct + pub fn new< + Scalar: IsCoreScalar + 'static, + const ROWS: usize, + const COLS: usize, + const BATCH_SIZE: usize, + >() -> Self { + STensorFormat { + number_category: Scalar::number_category(), + num_rows: ROWS, + num_cols: COLS, + batch_size: BATCH_SIZE, + num_bytes_per_scalar: std::mem::size_of::(), + } + } + + /// Number of bytes + pub fn num_bytes(&self) -> usize { + self.num_rows * self.num_cols * self.num_bytes_per_scalar + } +} + +#[test] +fn test_elements() { + use crate::linalg::scalar::IsScalar; + use crate::linalg::scalar::NumberCategory; + use crate::linalg::BatchScalar; + use crate::linalg::BatchScalarF64; + use crate::linalg::BatchVecF64; + + use crate::linalg::VecF32; + use approx::assert_abs_diff_eq; + assert_eq!(f32::number_category(), NumberCategory::Real); + assert_eq!(u32::number_category(), NumberCategory::Unsigned); + assert_eq!(i32::number_category(), NumberCategory::Signed); + assert_eq!( + BatchScalar::::number_category(), + NumberCategory::Real + ); + + let zeros_vec: VecF32<4> = IsStaticTensor::::from_slice(&[0.0f32, 0.0, 0.0, 0.0]); + for elem in zeros_vec.iter() { + assert_eq!(*elem, 0.0); + } + + let vec = SVec::::new(1.0, 2.0, 3.0); + assert_abs_diff_eq!(vec, SVec::::new(1.0, 2.0, 3.0)); + + let mat = SMat::::new(1.0, 2.0, 3.0, 4.0); + assert_eq!(mat.scalar([0, 0]), &1.0); + assert_eq!(mat.scalar([0, 1]), &2.0); + assert_eq!(mat.scalar([1, 0]), &3.0); + assert_eq!(mat.scalar([1, 1]), &4.0); + assert_abs_diff_eq!(mat, SMat::::new(1.0, 2.0, 3.0, 4.0)); + + let batch_vec: BatchVecF64<2, 2> = + BatchVecF64::from_element(BatchScalarF64::from_real_array([1.0, 2.0])); + assert_eq!(batch_vec.scalar([0]).extract_single(0), 1.0); + assert_eq!(batch_vec.scalar([1]).extract_single(1), 2.0); +} diff --git a/crates/sophus_tensor/src/layout.rs b/crates/sophus_core/src/tensor/layout.rs similarity index 98% rename from crates/sophus_tensor/src/layout.rs rename to crates/sophus_core/src/tensor/layout.rs index d62b916..5723340 100644 --- a/crates/sophus_tensor/src/layout.rs +++ b/crates/sophus_core/src/tensor/layout.rs @@ -1,8 +1,9 @@ use std::ops::Range; -use crate::sophus_calculus::types::M; +use crate::sophus_calculus::linalg::M; -use crate::element::{IsScalar, IsStaticTensor}; +use crate::tensor::element::IsScalar; +use crate::tensor::element::IsStaticTensor; pub trait HasShape { fn dims(&self) -> TensorShape; diff --git a/crates/sophus_tensor/src/mut_tensor.rs b/crates/sophus_core/src/tensor/mut_tensor.rs similarity index 70% rename from crates/sophus_tensor/src/mut_tensor.rs rename to crates/sophus_core/src/tensor/mut_tensor.rs index 42a56b3..4a969f6 100644 --- a/crates/sophus_tensor/src/mut_tensor.rs +++ b/crates/sophus_core/src/tensor/mut_tensor.rs @@ -1,17 +1,13 @@ -use crate::arc_tensor::ArcTensor; -use crate::element::BatchMat; -use crate::element::BatchScalar; -use crate::element::BatchVec; -use crate::element::IsStaticTensor; -use crate::element::IsTensorScalar; -use crate::element::SMat; -use crate::element::SVec; -use crate::mut_view::IsMutTensorLike; -use crate::mut_view::MutTensorView; -use crate::view::IsTensorLike; -use crate::view::IsTensorView; -use crate::view::TensorView; - +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::SMat; +use crate::linalg::SVec; +use crate::tensor::arc_tensor::ArcTensor; +use crate::tensor::element::IsStaticTensor; +use crate::tensor::mut_tensor_view::IsMutTensorLike; +use crate::tensor::mut_tensor_view::MutTensorView; +use crate::tensor::tensor_view::IsTensorLike; +use crate::tensor::tensor_view::IsTensorView; +use crate::tensor::tensor_view::TensorView; use ndarray::Dim; use ndarray::Ix; use std::fmt::Debug; @@ -25,11 +21,10 @@ pub struct MutTensor< const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, { @@ -46,10 +41,10 @@ pub trait InnerVecToMat< const SRANK: usize, const HYBER_RANK_PLUS1: usize, const SRANK_PLUS1: usize, - Scalar: IsTensorScalar + 'static, + Scalar: IsCoreScalar + 'static, const ROWS: usize, > where - SVec: IsStaticTensor, + SVec: IsStaticTensor, ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, { /// The output tensor @@ -66,9 +61,9 @@ pub trait InnerScalarToVec< const SRANK: usize, const HYBER_RANK_PLUS1: usize, const SRANK_PLUS1: usize, - Scalar: IsTensorScalar + 'static, + Scalar: IsCoreScalar + 'static, > where - SVec: IsStaticTensor, + SVec: IsStaticTensor, ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, { /// The output tensor @@ -78,7 +73,7 @@ pub trait InnerScalarToVec< fn inner_scalar_to_vec(self) -> Self::Output; } -impl InnerVecToMat<3, 1, 2, 4, 2, Scalar, ROWS> +impl InnerVecToMat<3, 1, 2, 4, 2, Scalar, ROWS> for MutTensorXR<3, 2, 1, Scalar, ROWS> { type Output = MutTensorXRC<4, 2, 2, Scalar, ROWS, 1>; @@ -91,31 +86,21 @@ impl InnerVecToMat<3, 1, 2, } } -impl InnerScalarToVec<2, 0, 2, 3, 1, Scalar> +impl InnerScalarToVec<2, 0, 2, 3, 1, Scalar> for MutTensorX<2, Scalar> { type Output = MutTensorXR<3, 2, 1, Scalar, 1>; fn inner_scalar_to_vec(self) -> MutTensorXR<3, 2, 1, Scalar, 1> { MutTensorXR::<3, 2, 1, Scalar, 1> { - mut_array: self.mut_array.map(|x| SVec::::new(*x)), + mut_array: self.mut_array.map(|x| SVec::::new(x.clone())), phantom: PhantomData, } } } /// Mutable tensor of scalars -pub type MutTensorX = - MutTensor; - -/// Mutable tensor of batched scalars -pub type MutTensorXB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const B: usize, -> = MutTensor, 1, 1, B>; +pub type MutTensorX = MutTensor; /// Mutable tensor of vectors with shape R pub type MutTensorXR< @@ -124,17 +109,7 @@ pub type MutTensorXR< const SRANK: usize, Scalar, const R: usize, -> = MutTensor, R, 1, 1>; - -/// Mutable tensor of batched vectors with shape [R x B] -pub type MutTensorXRB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const B: usize, -> = MutTensor, R, 1, B>; +> = MutTensor, R, 1>; /// Mutable tensor of matrices with shape [R x C] pub type MutTensorXRC< @@ -144,18 +119,7 @@ pub type MutTensorXRC< Scalar, const R: usize, const C: usize, -> = MutTensor, R, C, 1>; - -/// Mutable tensor of batched matrices with shape [R x C x B] -pub type MutTensorXRCB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const C: usize, - const B: usize, -> = MutTensor, R, C, B>; +> = MutTensor, R, C>; /// rank-1 mutable tensor of scalars with shape D0 pub type MutTensorD = MutTensorX<1, Scalar>; @@ -163,47 +127,37 @@ pub type MutTensorD = MutTensorX<1, Scalar>; /// rank-2 mutable tensor of scalars with shape [D0 x D1] pub type MutTensorDD = MutTensorX<2, Scalar>; -/// rank-2 mutable tensor of batched scalars with shape [D0 x B] -pub type MutTensorDB = MutTensorXB<2, 1, 1, Scalar, B>; - /// rank-2 mutable tensor of vectors with shape [D0 x R] pub type MutTensorDR = MutTensorXR<2, 1, 1, Scalar, R>; /// rank-3 mutable tensor of scalars with shape [D0 x D1 x D2] pub type MutTensorDDD = MutTensorX<3, Scalar>; -/// rank-3 mutable tensor of batched scalars with shape [D0 x D1 x B] -pub type MutTensorDDB = MutTensorXB<3, 2, 1, Scalar, B>; - /// rank-3 mutable tensor of vectors with shape [D0 x D1 x R] pub type MutTensorDDR = MutTensorXR<3, 2, 1, Scalar, R>; -/// rank-3 mutable tensor of batched vectors with shape [D0 x R x B] -pub type MutTensorDRB = MutTensorXRB<3, 1, 2, Scalar, R, B>; - /// rank-3 mutable tensor of matrices with shape [D0 x R x C] pub type MutTensorDRC = MutTensorXRC<3, 1, 2, Scalar, R, C>; /// rank-4 mutable tensor of scalars with shape [D0 x D1 x D2 x D3] pub type MutTensorDDDD = MutTensorX<4, Scalar>; -/// rank-4 mutable tensor of batched scalars with shape [D0 x D1 x D2 x B] -pub type MutTensorDDDB = MutTensorXB<4, 3, 1, Scalar, B>; - /// rank-4 mutable tensor of vectors with shape [D0 x D1 x D2 x R] pub type MutTensorDDDR = MutTensorXR<4, 3, 1, Scalar, R>; -/// rank-4 mutable tensor of batched vectors with shape [D0 x D1 x R x B] -pub type MutTensorDDRB = - MutTensorXRB<4, 2, 2, Scalar, R, B>; - -/// rank-4 mutable tensor of matrices with shape [D0 x R x C x B] +/// rank-4 mutable tensor of matrices with shape [D0 x D1 x R x C] pub type MutTensorDDRC = MutTensorXRC<4, 2, 2, Scalar, R, C>; -/// rank-4 mutable tensor of batched matrices with shape [D0 x R x C x B] -pub type MutTensorDRCB = - MutTensorXRCB<4, 1, 3, Scalar, R, C, B>; +/// rank-5 mutable tensor of scalars with shape [D0 x D1 x D2 x D3 x D4] +pub type MutTensorDDDDD = MutTensorX<5, Scalar>; + +/// rank-5 mutable tensor of vectors with shape [D0 x D1 x D2 x D3 x R] +pub type MutTensorDDDDR = MutTensorXR<5, 4, 1, Scalar, R>; + +/// rank-5 mutable tensor of matrices with shape [D0 x D1 x D2 x R x C] +pub type MutTensorDDDRC = + MutTensorXRC<5, 3, 2, Scalar, R, C>; macro_rules! mut_tensor_is_view { ($scalar_rank:literal, $srank:literal, $drank:literal) => { @@ -211,13 +165,12 @@ macro_rules! mut_tensor_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + for MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { fn elem_view<'b:'a>( &'b self, @@ -249,7 +202,7 @@ macro_rules! mut_tensor_is_view { fn to_mut_tensor( &self, - ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> { + ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { MutTensor { mut_array: self.elem_view().to_owned(), phantom: PhantomData::default(), @@ -259,18 +212,18 @@ macro_rules! mut_tensor_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, + > IsMutTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE + ROWS, COLS > - for MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + for MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { fn elem_view_mut<'b:'a>( &'b mut self, @@ -288,13 +241,93 @@ macro_rules! mut_tensor_is_view { } } - impl<'a, Scalar: IsTensorScalar+ 'static, - STensor: IsStaticTensor + 'static, + impl<'a, Scalar: IsCoreScalar+ 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, + + > PartialEq for + MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + fn eq(&self, other: &Self) -> bool { + self.view().scalar_view == other.view().scalar_view + } + } + + impl<'a, Scalar: IsCoreScalar+ 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, + > - MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + { + + /// create a new tensor from a shape - filled with zeros + pub fn from_shape(size: [usize; $drank]) -> Self { + MutTensor::<$scalar_rank, $drank, $srank, Scalar, STensor, + ROWS, COLS>::from_shape_and_val( + size, num_traits::Zero::zero() + ) + } + + /// create a new mutable tensor by applying a binary operator to each element of two + /// other tensors + pub fn from_map2< + 'b, + const OTHER_HRANK: usize, const OTHER_SRANK: usize, + OtherScalar: IsCoreScalar + 'static, + OtherSTensor: IsStaticTensor< + OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS + > + 'static, + const OTHER_ROWS: usize, const OTHER_COLS: usize, + V : IsTensorView::<'b, + OTHER_HRANK, $drank, OTHER_SRANK, + OtherScalar, OtherSTensor, + OTHER_ROWS, OTHER_COLS + >, + const OTHER_HRANK2: usize, const OTHER_SRANK2: usize, + OtherScalar2: IsCoreScalar + 'static, + OtherSTensor2: IsStaticTensor< + OtherScalar2, OTHER_SRANK2, OTHER_ROWS2, OTHER_COLS2, + > + 'static, + const OTHER_ROWS2: usize, const OTHER_COLS2: usize, + V2 : IsTensorView::<'b, + OTHER_HRANK2, $drank, OTHER_SRANK2, + OtherScalar2, OtherSTensor2, + OTHER_ROWS2, OTHER_COLS2 + >, + F: FnMut(&OtherSTensor, &OtherSTensor2)->STensor + >( + view: &'b V, + view2: &'b V2, + mut op: F, + ) + -> Self + where + ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, + ndarray::Dim<[ndarray::Ix; OTHER_HRANK2]>: ndarray::Dimension + + { + let mut out = Self::from_shape(view.dims()); + ndarray::Zip::from(&mut out.elem_view_mut()) + .and(&view.elem_view()) + .and(&view2.elem_view()) + .for_each( + |out, v, v2|{ + *out = op(v,v2); + }); + out + } + } + + impl<'a, Scalar: IsCoreScalar+ 'static, + STensor: IsStaticTensor + 'static, + const ROWS: usize, + const COLS: usize, + + > + MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { @@ -304,12 +337,12 @@ macro_rules! mut_tensor_is_view { ) -> MutTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE> + ROWS, COLS> { MutTensorView::< 'a, $scalar_rank, $drank, $srank, - Scalar, STensor, ROWS, COLS, BATCH_SIZE>::new + Scalar, STensor, ROWS, COLS>::new ( self.mut_array.view_mut() ) @@ -318,19 +351,12 @@ macro_rules! mut_tensor_is_view { /// returns a view of the tensor pub fn view<'b: 'a>(&'b self ) -> TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE> { + ROWS, COLS> { TensorView::<'a, $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE>::new( + ROWS, COLS>::new( self.mut_array.view()) } - /// create a new tensor from a shape - filled with zeros - pub fn from_shape(size: [usize; $drank]) -> Self { - MutTensor::<$scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE>::from_shape_and_val( - size, STensor::zero() - ) - } /// create a new tensor from a shape and a value pub fn from_shape_and_val @@ -347,7 +373,7 @@ macro_rules! mut_tensor_is_view { /// create a new mutable tensor by copying from another tensor pub fn make_copy_from( - v: &TensorView<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + v: &TensorView<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> ) -> Self { IsTensorLike::to_mut_tensor(v) @@ -355,13 +381,13 @@ macro_rules! mut_tensor_is_view { /// return ArcTensor copy of the mutable tensor pub fn to_shared(self) - -> ArcTensor::<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + -> ArcTensor::<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { ArcTensor::< $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, BATCH_SIZE>::from_mut_tensor(self) + ROWS, COLS>::from_mut_tensor(self) } /// create a new mutable tensor by applying a unary operator to each element of another @@ -369,17 +395,17 @@ macro_rules! mut_tensor_is_view { pub fn from_map< 'b, const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar+ 'static, + OtherScalar: IsCoreScalar+ 'static, OtherSTensor: IsStaticTensor< OtherScalar, OTHER_SRANK, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES + OTHER_ROWS, OTHER_COLS > + 'static, - const OTHER_ROWS: usize, const OTHER_COLS: usize, const OTHER_BATCHES: usize, + const OTHER_ROWS: usize, const OTHER_COLS: usize, V : IsTensorView::< 'b, OTHER_HRANK, $drank, OTHER_SRANK, OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES + OTHER_ROWS, OTHER_COLS >, F: FnMut(&OtherSTensor)-> STensor > ( @@ -396,54 +422,7 @@ macro_rules! mut_tensor_is_view { } } - /// create a new mutable tensor by applying a binary operator to each element of two - /// other tensors - pub fn from_map2< - 'b, - const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar + 'static, - OtherSTensor: IsStaticTensor< - OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, OTHER_BATCHES - > + 'static, - const OTHER_ROWS: usize, const OTHER_COLS: usize, const OTHER_BATCHES: usize, - V : IsTensorView::<'b, - OTHER_HRANK, $drank, OTHER_SRANK, - OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES - >, - const OTHER_HRANK2: usize, const OTHER_SRANK2: usize, - OtherScalar2: IsTensorScalar + 'static, - OtherSTensor2: IsStaticTensor< - OtherScalar2, OTHER_SRANK2, OTHER_ROWS2, OTHER_COLS2, OTHER_BATCHES2, - > + 'static, - const OTHER_ROWS2: usize, const OTHER_COLS2: usize, const OTHER_BATCHES2: usize, - V2 : IsTensorView::<'b, - OTHER_HRANK2, $drank, OTHER_SRANK2, - OtherScalar2, OtherSTensor2, - OTHER_ROWS2, OTHER_COLS2, OTHER_BATCHES2 - >, - F: FnMut(&OtherSTensor, &OtherSTensor2)->STensor - >( - view: &'b V, - view2: &'b V2, - mut op: F, - ) - -> Self - where - ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, - ndarray::Dim<[ndarray::Ix; OTHER_HRANK2]>: ndarray::Dimension - { - let mut out = Self::from_shape(view.dims()); - ndarray::Zip::from(&mut out.elem_view_mut()) - .and(&view.elem_view()) - .and(&view2.elem_view()) - .for_each( - |out, v, v2|{ - *out = op(v,v2); - }); - out - } } }; } @@ -457,44 +436,39 @@ mut_tensor_is_view!(3, 2, 1); mut_tensor_is_view!(4, 0, 4); mut_tensor_is_view!(4, 1, 3); mut_tensor_is_view!(4, 2, 2); -mut_tensor_is_view!(4, 3, 1); - -#[cfg(test)] -mod tests { - use simba::simd::AutoSimd; - - use super::*; - - #[test] - fn empty_image() { - { - let _rank1_tensor = MutTensorD::::default(); - //assert!(rank1_tensor.is_empty()); - let shape = [2]; - let tensor_f32 = MutTensorD::from_shape_and_val(shape, 0.0); - //assert!(!tensor_f32.is_empty()); - assert_eq!(tensor_f32.view().dims(), shape); - } - { - let _rank2_tensor = MutTensorDD::::default(); - //assert!(rank2_tensor.is_empty()); - let shape = [3, 2]; - let tensor_f32 = MutTensorDD::::from_shape(shape); - // assert!(!tensor_f32.is_empty()); - assert_eq!(tensor_f32.view().dims(), shape); - } - { - let _rank3_tensor = MutTensorDDD::::default(); - // assert!(rank3_tensor.is_empty()); - let shape = [3, 2, 4]; - let tensor_f32 = MutTensorDDD::::from_shape(shape); - // assert!(!tensor_f32.is_empty()); - assert_eq!(tensor_f32.view().dims(), shape); - } +mut_tensor_is_view!(5, 0, 5); +mut_tensor_is_view!(5, 1, 4); +mut_tensor_is_view!(5, 2, 3); + +#[test] +fn mut_tensor_tests() { + use crate::linalg::BatchMatF64; + { + let _rank1_tensor = MutTensorD::::default(); + //assert!(rank1_tensor.is_empty()); + let shape = [2]; + let tensor_f32 = MutTensorD::from_shape_and_val(shape, 0.0); + //assert!(!tensor_f32.is_empty()); + assert_eq!(tensor_f32.view().dims(), shape); } - - #[test] - pub fn transform() { + { + let _rank2_tensor = MutTensorDD::::default(); + //assert!(rank2_tensor.is_empty()); + let shape = [3, 2]; + let tensor_f32 = MutTensorDD::::from_shape(shape); + // assert!(!tensor_f32.is_empty()); + assert_eq!(tensor_f32.view().dims(), shape); + } + { + let _rank3_tensor = MutTensorDDD::::default(); + // assert!(rank3_tensor.is_empty()); + let shape = [3, 2, 4]; + let tensor_f32 = MutTensorDDD::::from_shape(shape); + // assert!(!tensor_f32.is_empty()); + assert_eq!(tensor_f32.view().dims(), shape); + } + //transform + { let shape = [3]; { let tensor_f32 = MutTensorD::from_shape_and_val(shape, 1.0); @@ -543,22 +517,22 @@ mod tests { } } - #[test] - pub fn types() { + //linalg + { let shape = [3]; let _tensor_u8 = MutTensorD::from_shape_and_val(shape, 0); - let _tensor_f32 = MutTensorDRC::from_shape_and_val(shape, SMat::::zeros()); + let _tensor_f64 = MutTensorDRC::from_shape_and_val(shape, SMat::::zeros()); let _tensor_batched_f32 = - MutTensorDRCB::from_shape_and_val(shape, SMat::, 4, 4>::zeros()); + MutTensorDRC::from_shape_and_val(shape, BatchMatF64::<2, 3, 4>::zeros()); } - #[test] - pub fn from_raw_data() { + //from_raw_data + { let shape = [1]; let data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]; let data_mat = SMat::::from_vec(data.to_vec()); - let tensor_f32 = MutTensorDRC::from_shape_and_val(shape, data_mat); // CxWxH + let tensor_f32 = MutTensorDRC::from_shape_and_val(shape, data_mat); assert_eq!(tensor_f32.dims(), shape); assert_eq!(tensor_f32.view().scalar_get([0, 0, 0]), data[0]); assert_eq!(tensor_f32.view().scalar_get([0, 1, 0]), data[1]); diff --git a/crates/sophus_tensor/src/mut_view.rs b/crates/sophus_core/src/tensor/mut_tensor_view.rs similarity index 84% rename from crates/sophus_tensor/src/mut_view.rs rename to crates/sophus_core/src/tensor/mut_tensor_view.rs index 7796fce..05fc4fa 100644 --- a/crates/sophus_tensor/src/mut_view.rs +++ b/crates/sophus_core/src/tensor/mut_tensor_view.rs @@ -1,11 +1,10 @@ -use crate::mut_tensor::MutTensor; -use crate::view::IsTensorLike; -use crate::view::IsTensorView; -use crate::view::TensorView; +use crate::linalg::scalar::IsCoreScalar; +use crate::tensor::element::IsStaticTensor; +use crate::tensor::mut_tensor::MutTensor; +use crate::tensor::tensor_view::IsTensorLike; +use crate::tensor::tensor_view::IsTensorView; +use crate::tensor::tensor_view::TensorView; use concat_arrays::concat_arrays; - -use crate::element::IsStaticTensor; -use crate::element::IsTensorScalar; use std::marker::PhantomData; /// Mutable tensor view @@ -17,11 +16,10 @@ pub struct MutTensorView< const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, @@ -38,12 +36,11 @@ pub trait IsMutTensorLike< const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, ->: IsTensorLike<'a, TOTAL_RANK, DRANK, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE> where +>: IsTensorLike<'a, TOTAL_RANK, DRANK, SRANK, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { @@ -66,18 +63,17 @@ macro_rules! mut_view_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > MutTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > MutTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { /// Returns a tensor view pub fn view( & self, - ) -> TensorView<'_, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + ) -> TensorView<'_, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { let v = TensorView { elem_view: self.elem_view_mut.view(), @@ -108,7 +104,7 @@ macro_rules! mut_view_is_view { let ptr = elem_view_mut.as_ptr() as *mut Scalar; use ndarray::ShapeBuilder; assert_eq!(std::mem::size_of::(), - std::mem::size_of::() * ROWS * COLS* BATCH_SIZE + std::mem::size_of::() * ROWS * COLS ); let scalar_view_mut = @@ -130,17 +126,15 @@ macro_rules! mut_view_is_view { 'b, const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar + 'static, + OtherScalar: IsCoreScalar + 'static, OtherSTensor: IsStaticTensor< OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, > + 'static, const OTHER_ROWS: usize, const OTHER_COLS: usize, - const OTHER_BATCHES: usize, V : IsTensorView::< 'b, OTHER_HRANK, @@ -150,7 +144,6 @@ macro_rules! mut_view_is_view { OtherSTensor, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, >, F: FnMut(&mut STensor, &OtherSTensor) >( @@ -169,12 +162,11 @@ macro_rules! mut_view_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> for MutTensorView< 'a, $scalar_rank, @@ -183,9 +175,8 @@ macro_rules! mut_view_is_view { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE> - { + COLS +> { fn elem_view<'b:'a>( &'b self, ) -> ndarray::ArrayView<'a, STensor, ndarray::Dim<[ndarray::Ix; $drank]>> { @@ -216,7 +207,7 @@ macro_rules! mut_view_is_view { fn to_mut_tensor( &self, - ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> { + ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { MutTensor { mut_array: self.view().elem_view.to_owned(), phantom: PhantomData::default(), @@ -226,28 +217,25 @@ macro_rules! mut_view_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > IsMutTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, - COLS, - BATCH_SIZE - > + COLS > for MutTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, - ROWS, COLS, - BATCH_SIZE + ROWS, + COLS, > { fn elem_view_mut<'b:'a>( @@ -280,4 +268,6 @@ mut_view_is_view!(3, 2, 1); mut_view_is_view!(4, 0, 4); mut_view_is_view!(4, 1, 3); mut_view_is_view!(4, 2, 2); -mut_view_is_view!(4, 3, 1); +mut_view_is_view!(5, 0, 5); +mut_view_is_view!(5, 1, 4); +mut_view_is_view!(5, 2, 3); diff --git a/crates/sophus_tensor/src/view.rs b/crates/sophus_core/src/tensor/tensor_view.rs similarity index 56% rename from crates/sophus_tensor/src/view.rs rename to crates/sophus_core/src/tensor/tensor_view.rs index f02880c..40cf98e 100644 --- a/crates/sophus_tensor/src/view.rs +++ b/crates/sophus_core/src/tensor/tensor_view.rs @@ -1,11 +1,8 @@ -use crate::element::BatchMat; -use crate::element::BatchScalar; -use crate::element::BatchVec; -use crate::element::IsStaticTensor; -use crate::element::IsTensorScalar; -use crate::element::SMat; -use crate::element::SVec; -use crate::mut_tensor::MutTensor; +use crate::linalg::scalar::IsCoreScalar; +use crate::linalg::SMat; +use crate::linalg::SVec; +use crate::tensor::element::IsStaticTensor; +use crate::tensor::mut_tensor::MutTensor; use concat_arrays::concat_arrays; use std::marker::PhantomData; @@ -23,20 +20,13 @@ use std::marker::PhantomData; /// ``self.get(idx)``, where idx is f type ``[usize: DRANK]``. /// - Each element is of type ``STensor``. /// * Each static tensor is of SRANK. In particular we have. -/// - rank 0: scalars of type ``Scalar`` (such as ``f64`` or ``u8``). +/// - rank 0: scalars of type ``Scalar`` (such as ``u8`` or BatchF64<8>). /// - rank 1: -/// * A batch scalar of type ``BatchScalar`` with static -/// batch size of BATCH_SIZE. /// * A column vector ``SVec`` aka ``nalgebra::SVector`` with /// number of ROWS. /// - rank 2: -/// * A batch vector of type ``BatchVector`` with static -/// shape (ROWS x BATCH_SIZE). /// * A matrix ``SMat`` aka ``nalgebra::SMatrix`` /// with static shape (ROWS x COLS). -/// - rank 3: -/// * A batch matrix of type ``BatchMatrix`` with static -/// shape (BATCH_SIZE x ROWS .x COLS). /// 2. A scalar tensor of TOTAL_RANK = DRANK + SRANK. /// * ``self.scalar_dims()`` is used to access its dimensions of type /// ``[usize: TOTAL_RANK]`` at runtime. @@ -48,11 +38,10 @@ pub struct TensorView< const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, @@ -65,17 +54,7 @@ pub struct TensorView< /// Tensor view of scalars pub type TensorViewX<'a, const DRANK: usize, Scalar> = - TensorView<'a, DRANK, DRANK, 0, Scalar, Scalar, 1, 1, 1>; - -/// Tensor view of batched scalars -pub type TensorViewXB< - 'a, - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const B: usize, -> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchScalar, 1, 1, B>; + TensorView<'a, DRANK, DRANK, 0, Scalar, Scalar, 1, 1>; /// Tensor view of vectors with shape R pub type TensorViewXR< @@ -85,18 +64,7 @@ pub type TensorViewXR< const SRANK: usize, Scalar, const R: usize, -> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SVec, R, 1, 1>; - -/// Tensor view of batched vectors with shape [R x B] -pub type TensorViewXRB< - 'a, - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const B: usize, -> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchVec, R, 1, B>; +> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SVec, R, 1>; /// Tensor view of matrices with shape [R x C] pub type TensorViewXRC< @@ -107,19 +75,7 @@ pub type TensorViewXRC< Scalar, const R: usize, const C: usize, -> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SMat, R, C, 1>; - -/// Tensor view of batched matrices with shape [R x C x B] -pub type TensorViewXRCB< - 'a, - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const C: usize, - const B: usize, -> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchMat, R, C, B>; +> = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SMat, R, C>; /// rank-1 tensor view of scalars with shape D0 pub type TensorViewD<'a, Scalar> = TensorViewX<'a, 1, Scalar>; @@ -127,25 +83,15 @@ pub type TensorViewD<'a, Scalar> = TensorViewX<'a, 1, Scalar>; /// rank-2 tensor view of scalars with shape [D0 x D1] pub type TensorViewDD<'a, Scalar> = TensorViewX<'a, 2, Scalar>; -/// rank-2 tensor view of batched scalars with shape [D0 x B] -pub type TensorViewDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 2, 1, 1, Scalar, B>; - /// rank-2 tensor view of vectors with shape [D0 x R] pub type TensorViewDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 2, 1, 1, Scalar, R>; /// rank-3 tensor view of scalars with shape [D0 x R x B] pub type TensorViewDDD<'a, Scalar> = TensorViewX<'a, 3, Scalar>; -/// rank-3 tensor view of batched scalars with shape [D0 x D1 x B] -pub type TensorViewDDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 3, 2, 1, Scalar, B>; - /// rank-3 tensor view of vectors with shape [D0 x D1 x R] pub type TensorViewDDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 3, 2, 1, Scalar, R>; -/// rank-3 tensor view of batched vectors with shape [D0 x R x B] -pub type TensorViewDRB<'a, Scalar, const R: usize, const B: usize> = - TensorViewXRB<'a, 3, 1, 2, Scalar, R, B>; - /// rank-3 tensor view of matrices with shape [D0 x R x C] pub type TensorViewDRC<'a, Scalar, const R: usize, const C: usize> = TensorViewXRC<'a, 3, 1, 2, Scalar, R, C>; @@ -153,35 +99,23 @@ pub type TensorViewDRC<'a, Scalar, const R: usize, const C: usize> = /// rank-4 tensor view of scalars with shape [D0 x D1 x D2 x D3] pub type TensorViewDDDD<'a, Scalar> = TensorViewX<'a, 4, Scalar>; -/// rank-4 tensor view of batched scalars with shape [D0 x D1 x D2 x B] -pub type TensorViewDDDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 4, 3, 1, Scalar, B>; - /// rank-4 tensor view of vectors with shape [D0 x D1 x D2 x R] pub type TensorViewDDDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 4, 3, 1, Scalar, R>; -/// rank-4 tensor view of batched vectors with shape [D0 x D1 x R x B] -pub type TensorViewDDRB<'a, Scalar, const R: usize, const B: usize> = - TensorViewXRB<'a, 4, 2, 2, Scalar, R, B>; - /// rank-4 tensor view of matrices with shape [D0 x R x C x B] pub type TensorViewDDRC<'a, Scalar, const R: usize, const C: usize> = TensorViewXRC<'a, 4, 2, 2, Scalar, R, C>; -/// rank-4 tensor view of batched matrices with shape [D0 x R x C x B] -pub type TensorViewDRCB<'a, Scalar, const R: usize, const C: usize, const B: usize> = - TensorViewXRCB<'a, 4, 1, 3, Scalar, R, C, B>; - /// Is a tensor-like object pub trait IsTensorLike< 'a, const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, @@ -209,9 +143,7 @@ pub trait IsTensorLike< fn scalar_dims(&self) -> [usize; TOTAL_RANK]; /// Convert to a mutable tensor - this will copy the tensor - fn to_mut_tensor( - &self, - ) -> MutTensor; + fn to_mut_tensor(&self) -> MutTensor; } /// Is a tensor view like object @@ -220,12 +152,11 @@ pub trait IsTensorView< const TOTAL_RANK: usize, const DRANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, ->: IsTensorLike<'a, TOTAL_RANK, DRANK, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE> where +>: IsTensorLike<'a, TOTAL_RANK, DRANK, SRANK, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; DRANK]>: ndarray::Dimension, ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { @@ -237,12 +168,11 @@ macro_rules! tensor_view_is_view { ($scalar_rank:literal, $srank:literal, $drank:literal) => { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { /// Create a new tensor view from an ndarray of static tensors pub fn new( @@ -264,7 +194,7 @@ macro_rules! tensor_view_is_view { assert_eq!( std::mem::size_of::(), - std::mem::size_of::() * ROWS * COLS * BATCH_SIZE + std::mem::size_of::() * ROWS * COLS ); let scalar_view = unsafe { ndarray::ArrayView::from_shape_ptr(shape.strides(strides), ptr) }; @@ -284,13 +214,12 @@ macro_rules! tensor_view_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsTensorLike<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + for TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { fn elem_view<'b: 'a>( &'b self, @@ -299,7 +228,7 @@ macro_rules! tensor_view_is_view { } fn get(&self, idx: [usize; $drank]) -> STensor { - self.elem_view[idx] + self.elem_view[idx].clone() } fn dims(&self) -> [usize; $drank] { @@ -313,7 +242,7 @@ macro_rules! tensor_view_is_view { } fn scalar_get(&'a self, idx: [usize; $scalar_rank]) -> Scalar { - self.scalar_view[idx] + self.scalar_view[idx].clone() } fn scalar_dims(&self) -> [usize; $scalar_rank] { @@ -322,7 +251,7 @@ macro_rules! tensor_view_is_view { fn to_mut_tensor( &self, - ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> { + ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { MutTensor { mut_array: self.elem_view.to_owned(), phantom: PhantomData::default(), @@ -332,30 +261,27 @@ macro_rules! tensor_view_is_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsTensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> + for TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { fn view<'b: 'a>( &'b self, - ) -> TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { - *self + ) -> TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { + self.clone() } } impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, - const BATCH_SIZE: usize, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - > TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > TensorView<'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS> { } }; @@ -370,108 +296,103 @@ tensor_view_is_view!(3, 2, 1); tensor_view_is_view!(4, 0, 4); tensor_view_is_view!(4, 1, 3); tensor_view_is_view!(4, 2, 2); -tensor_view_is_view!(4, 3, 1); - -#[cfg(test)] -mod tests { - - #[test] - fn view() { - use super::*; - use ndarray::ShapeBuilder; - { - let rank1_shape = [3]; - let arr: [u8; 3] = [5, 6, 7]; - - let ndview = - ndarray::ArrayView::from_shape(rank1_shape.strides([1]), &arr[..]).unwrap(); - assert!(ndview.is_standard_layout()); - let view = TensorViewD::new(ndview); - - for i in 0..view.dims()[0] { - assert_eq!(arr[i], view.get([i])); - } +tensor_view_is_view!(5, 0, 5); +tensor_view_is_view!(5, 1, 4); +tensor_view_is_view!(5, 2, 3); + +#[test] +fn tensor_view_tests() { + use ndarray::ShapeBuilder; + { + let rank1_shape = [3]; + let arr: [u8; 3] = [5, 6, 7]; + + let ndview = ndarray::ArrayView::from_shape(rank1_shape.strides([1]), &arr[..]).unwrap(); + assert!(ndview.is_standard_layout()); + let view = TensorViewD::new(ndview); + + for i in 0..view.dims()[0] { + assert_eq!(arr[i], view.get([i])); } - { - const ROWS: usize = 2; - const COLS: usize = 3; + } + { + const ROWS: usize = 2; + const COLS: usize = 3; - type Mat2x3 = SMat; + type Mat2x3 = SMat; - let a = Mat2x3::new(0.1, 0.56, 0.77, 2.0, 5.1, 7.0); - let b = Mat2x3::new(0.6, 0.5, 0.78, 2.0, 5.2, 7.1); - let c = Mat2x3::new(0.9, 0.58, 0.7, 2.0, 5.3, 7.2); - let d = Mat2x3::new(0.9, 0.50, 0.9, 2.0, 5.0, 7.3); + let a = Mat2x3::new(0.1, 0.56, 0.77, 2.0, 5.1, 7.0); + let b = Mat2x3::new(0.6, 0.5, 0.78, 2.0, 5.2, 7.1); + let c = Mat2x3::new(0.9, 0.58, 0.7, 2.0, 5.3, 7.2); + let d = Mat2x3::new(0.9, 0.50, 0.9, 2.0, 5.0, 7.3); - let rank2_shape = [4, 2]; - let arr = [a, a, b, c, d, c, b, b]; + let rank2_shape = [4, 2]; + let arr = [a, a, b, c, d, c, b, b]; - let strides = [2, 1]; - let ndview = - ndarray::ArrayView::from_shape(rank2_shape.strides([2, 1]), &arr[..]).unwrap(); - assert!(ndview.is_standard_layout()); - let view = TensorViewDDRC::new(ndview); + let strides = [2, 1]; + let ndview = ndarray::ArrayView::from_shape(rank2_shape.strides([2, 1]), &arr[..]).unwrap(); + assert!(ndview.is_standard_layout()); + let view = TensorViewDDRC::new(ndview); - println!("{}", view.elem_view); - for d0 in 0..view.dims()[0] { - for d1 in 0..view.dims()[1] { - assert_eq!(view.get([d0, d1]), arr[strides[0] * d0 + strides[1] * d1]); - } + println!("{}", view.elem_view); + for d0 in 0..view.dims()[0] { + for d1 in 0..view.dims()[1] { + assert_eq!(view.get([d0, d1]), arr[strides[0] * d0 + strides[1] * d1]); } + } - println!("{:?}", view.scalar_view); - assert!(!view.scalar_view.is_standard_layout()); - for d0 in 0..view.scalar_dims()[0] { - for d1 in 0..view.scalar_dims()[1] { - for c in 0..COLS { - for r in 0..ROWS { - assert_eq!( - view.scalar_get([d0, d1, r, c]), - arr[strides[0] * d0 + strides[1] * d1][c * ROWS + r] - ); - } + println!("{:?}", view.scalar_view); + assert!(!view.scalar_view.is_standard_layout()); + for d0 in 0..view.scalar_dims()[0] { + for d1 in 0..view.scalar_dims()[1] { + for c in 0..COLS { + for r in 0..ROWS { + assert_eq!( + view.scalar_get([d0, d1, r, c]), + arr[strides[0] * d0 + strides[1] * d1][c * ROWS + r] + ); } } } } + } - { - let rank3_shape = [4, 2, 3]; - let raw_arr = [ - 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, - ]; - - let arr = raw_arr.map(SVec::::new); - - let strides = [6, 3, 1]; - let ndview = - ndarray::ArrayView::from_shape(rank3_shape.strides(strides), &arr[..]).unwrap(); - assert!(ndview.is_standard_layout()); - let view = TensorViewDDDR::new(ndview); - - println!("{}", view.elem_view); - for d0 in 0..view.dims()[0] { - for d1 in 0..view.dims()[1] { - for d2 in 0..view.dims()[2] { - assert_eq!( - view.get([d0, d1, d2]), - arr[strides[0] * d0 + strides[1] * d1 + strides[2] * d2] - ); - } + { + let rank3_shape = [4, 2, 3]; + let raw_arr = [ + 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, + 28, + ]; + + let arr = raw_arr.map(SVec::::new); + + let strides = [6, 3, 1]; + let ndview = + ndarray::ArrayView::from_shape(rank3_shape.strides(strides), &arr[..]).unwrap(); + assert!(ndview.is_standard_layout()); + let view = TensorViewDDDR::new(ndview); + + println!("{}", view.elem_view); + for d0 in 0..view.dims()[0] { + for d1 in 0..view.dims()[1] { + for d2 in 0..view.dims()[2] { + assert_eq!( + view.get([d0, d1, d2]), + arr[strides[0] * d0 + strides[1] * d1 + strides[2] * d2] + ); } } + } - println!("{:?}", view.scalar_view); - for d0 in 0..view.scalar_dims()[0] { - for d1 in 0..view.scalar_dims()[1] { - for d2 in 0..view.scalar_dims()[2] { - for r in 0..1 { - assert_eq!( - view.scalar_get([d0, d1, d2, r]), - arr[strides[0] * d0 + strides[1] * d1 + strides[2] * d2][r] - ); - } + println!("{:?}", view.scalar_view); + for d0 in 0..view.scalar_dims()[0] { + for d1 in 0..view.scalar_dims()[1] { + for d2 in 0..view.scalar_dims()[2] { + for r in 0..1 { + assert_eq!( + view.scalar_get([d0, d1, d2, r]), + arr[strides[0] * d0 + strides[1] * d1 + strides[2] * d2][r] + ); } } } diff --git a/crates/sophus_image/Cargo.toml b/crates/sophus_image/Cargo.toml index 0e872cd..141cea2 100644 --- a/crates/sophus_image/Cargo.toml +++ b/crates/sophus_image/Cargo.toml @@ -11,11 +11,12 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_tensor.workspace = true +sophus_core.workspace = true approx.workspace = true assertables.workspace = true bytemuck.workspace = true nalgebra.workspace = true ndarray.workspace = true +num-traits.workspace = true png.workspace = true diff --git a/crates/sophus_image/src/arc_image.rs b/crates/sophus_image/src/arc_image.rs index e098da3..468ce8c 100644 --- a/crates/sophus_image/src/arc_image.rs +++ b/crates/sophus_image/src/arc_image.rs @@ -3,36 +3,35 @@ use crate::image_view::ImageSize; use crate::image_view::IsImageView; use crate::mut_image::GenMutImage; -use sophus_tensor::arc_tensor::ArcTensor; -use sophus_tensor::element::IsStaticTensor; -use sophus_tensor::element::IsTensorScalar; -use sophus_tensor::element::SVec; -use sophus_tensor::view::IsTensorLike; -use sophus_tensor::view::IsTensorView; -use sophus_tensor::view::TensorView; +use sophus_core::linalg::scalar::IsCoreScalar; +use sophus_core::linalg::SVec; +use sophus_core::tensor::arc_tensor::ArcTensor; +use sophus_core::tensor::element::IsStaticTensor; +use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_core::tensor::tensor_view::IsTensorView; +use sophus_core::tensor::tensor_view::TensorView; /// Image of static tensors with shared ownership #[derive(Debug, Clone)] pub struct GenArcImage< const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > { /// underlying tensor - pub tensor: ArcTensor, + pub tensor: ArcTensor, } /// Image of scalar values -pub type ArcImage = GenArcImage<2, 0, Scalar, Scalar, 1, 1, 1>; +pub type ArcImage = GenArcImage<2, 0, Scalar, Scalar, 1, 1>; /// Image of vector values /// /// Here, R indicates the number of rows in the vector -pub type ArcImageR = GenArcImage<3, 1, Scalar, SVec, R, 1, 1>; +pub type ArcImageR = GenArcImage<3, 1, Scalar, SVec, R, 1>; /// Image of u8 scalars pub type ArcImageU8 = ArcImage; @@ -69,17 +68,16 @@ macro_rules! arc_image { /// impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > From> - for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > From> + for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { - fn from(value: GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>) -> Self { + fn from(value: GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS>) -> Self { Self::from_mut_image(value) } } @@ -88,19 +86,18 @@ macro_rules! arc_image { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { /// Convert an GenArcImage to a GenMutImage /// /// It is best practice to not call this function directly. Instead, use the ``.into()`` /// method generated by the ``From`` trait. pub fn from_mut_image( - image: GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + image: GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS>, ) -> Self { Self { tensor: ArcTensor::< @@ -111,7 +108,6 @@ macro_rules! arc_image { STensor, ROWS, COLS, - BATCH_SIZE, >::from_mut_tensor(image.mut_tensor), } } @@ -127,14 +123,13 @@ macro_rules! arc_image { STensor, ROWS, COLS, - BATCH_SIZE, >::from_shape_and_val(size.into(), val), } } /// create a new image from an image view pub fn make_copy_from( - v: &GenImageView<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + v: &GenImageView<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS>, ) -> Self { Self { tensor: ArcTensor::< @@ -145,14 +140,13 @@ macro_rules! arc_image { STensor, ROWS, COLS, - BATCH_SIZE, >::make_copy_from(&v.tensor_view), } } /// create a new image from image size and a slice pub fn make_copy_from_size_and_slice(image_size: ImageSize, slice: &'a [STensor]) -> Self { - GenMutImage::<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + GenMutImage::<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> ::make_copy_from_size_and_slice(image_size, slice).into() } @@ -161,17 +155,15 @@ macro_rules! arc_image { 'b, const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar + 'static, + OtherScalar: IsCoreScalar + 'static, OtherSTensor: IsStaticTensor< OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, > + 'static, const OTHER_ROWS: usize, const OTHER_COLS: usize, - const OTHER_BATCHES: usize, F: FnMut(&OtherSTensor)-> STensor >( v: &'b GenImageView::< @@ -182,14 +174,13 @@ macro_rules! arc_image { OtherSTensor, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, >, op: F, ) -> Self where ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, - TensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, OTHER_ROWS, OTHER_COLS, OTHER_BATCHES>: - IsTensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, OTHER_ROWS, OTHER_COLS, OTHER_BATCHES>, + TensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, OTHER_ROWS, OTHER_COLS>: + IsTensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, OTHER_ROWS, OTHER_COLS>, { Self { @@ -201,7 +192,6 @@ macro_rules! arc_image { STensor, ROWS, COLS, - BATCH_SIZE, >::from_map(&v.tensor_view, op), } } @@ -211,29 +201,29 @@ macro_rules! arc_image { /// creates an image from a binary operator applied to two image views impl< 'b, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > Default for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + + > Default for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { fn default() -> Self { - Self::from_image_size_and_val(ImageSize::default(), STensor::zero()) + Self::from_image_size_and_val(ImageSize::default(), num_traits::Zero::zero()) } } impl< 'b, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsImageView<'b, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + + > IsImageView<'b, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenArcImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { @@ -247,7 +237,6 @@ macro_rules! arc_image { STensor, ROWS, COLS, - BATCH_SIZE, > { GenImageView { tensor_view: self.tensor.view(), diff --git a/crates/sophus_image/src/image_view.rs b/crates/sophus_image/src/image_view.rs index 4cca6e1..ece6a0a 100644 --- a/crates/sophus_image/src/image_view.rs +++ b/crates/sophus_image/src/image_view.rs @@ -1,9 +1,9 @@ -use sophus_tensor::element::IsStaticTensor; -use sophus_tensor::element::IsTensorScalar; -use sophus_tensor::element::SVec; -use sophus_tensor::view::IsTensorLike; -use sophus_tensor::view::IsTensorView; -use sophus_tensor::view::TensorView; +use sophus_core::linalg::scalar::IsCoreScalar; +use sophus_core::linalg::SVec; +use sophus_core::tensor::element::IsStaticTensor; +use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_core::tensor::tensor_view::IsTensorView; +use sophus_core::tensor::tensor_view::TensorView; /// Image size #[derive(Debug, Copy, Clone, Default)] @@ -51,26 +51,25 @@ pub struct GenImageView< 'a, const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { /// underlying tensor view - pub tensor_view: TensorView<'a, TOTAL_RANK, 2, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + pub tensor_view: TensorView<'a, TOTAL_RANK, 2, SRANK, Scalar, STensor, ROWS, COLS>, } /// Image view of scalar values -pub type ImageView<'a, Scalar> = GenImageView<'a, 2, 0, Scalar, Scalar, 1, 1, 1>; +pub type ImageView<'a, Scalar> = GenImageView<'a, 2, 0, Scalar, Scalar, 1, 1>; /// Image view of vector values /// /// Here, R indicates the number of rows in the vector pub type ImageViewR<'a, Scalar, const ROWS: usize> = - GenImageView<'a, 3, 1, Scalar, SVec, ROWS, 1, 1>; + GenImageView<'a, 3, 1, Scalar, SVec, ROWS, 1>; /// Image view of u8 values pub type ImageViewU8<'a> = ImageView<'a, u8>; @@ -102,18 +101,15 @@ pub trait IsImageView< 'a, const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { /// Get the image view - fn image_view( - &'a self, - ) -> GenImageView<'a, TOTAL_RANK, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE>; + fn image_view(&'a self) -> GenImageView<'a, TOTAL_RANK, SRANK, Scalar, STensor, ROWS, COLS>; /// Get the row stride of the image fn stride(&'a self) -> usize { @@ -132,15 +128,14 @@ macro_rules! image_view { ($scalar_rank:literal, $srank:literal) => { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where - TensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>: - IsTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + TensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>: + IsTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>, ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { /// Create a new image view from an image size and a slice of data @@ -155,7 +150,6 @@ macro_rules! image_view { STensor, ROWS, COLS, - BATCH_SIZE, >::from_shape_and_slice( [image_size.height, image_size.width], slice ), @@ -184,7 +178,6 @@ macro_rules! image_view { STensor, ROWS, COLS, - BATCH_SIZE, >::new(self.tensor_view.elem_view.slice(ndarray::s![ start[0]..start[0] + size[0], start[1]..start[1] + size[1] @@ -195,16 +188,15 @@ macro_rules! image_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where - TensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>: - IsTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + TensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>: + IsTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>, ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { fn pixel(&'a self, u: usize, v: usize) -> STensor { @@ -216,10 +208,9 @@ macro_rules! image_view { fn image_view( &'a self, - ) -> GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { + ) -> GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { Self { - tensor_view: self.tensor_view, + tensor_view: self.tensor_view.clone(), } } diff --git a/crates/sophus_image/src/intensity_image.rs b/crates/sophus_image/src/intensity_image.rs index 4e510b3..fe1c961 100644 --- a/crates/sophus_image/src/intensity_image.rs +++ b/crates/sophus_image/src/intensity_image.rs @@ -40,10 +40,10 @@ use crate::mut_image::MutImageF32; use crate::mut_image::MutImageR; use crate::mut_image::MutImageU16; use crate::mut_image::MutImageU8; +use sophus_core::linalg::scalar::IsCoreScalar; -use sophus_tensor::element::IsStaticTensor; -use sophus_tensor::element::IsTensorScalar; -use sophus_tensor::element::SVec; +use sophus_core::linalg::SVec; +use sophus_core::tensor::element::IsStaticTensor; /// dynamic mutable intensity image of unsigned integer values pub enum DynIntensityMutImageU { @@ -285,8 +285,8 @@ pub enum DynIntensityImageView<'a> { /// Hence it s a trait for grayscale (1-channel), grayscale+alpha (2-channel), RGB (3-channel), and /// RGBA images (4-channel). /// -/// This trait provides methods for converting between different image types. As of now, three -/// scalar types are supported: `u8`, `u16`, and `f32`: +/// This trait provides methods for converting between different image type. As of now, three +/// scalar type are supported: `u8`, `u16`, and `f32`: /// /// - u8 images are in the range [0, 255], i.e. 100% intensity corresponds to 255. /// @@ -295,24 +295,23 @@ pub enum DynIntensityImageView<'a> { /// - f32 images shall be in the range [0.0, 1.0] and 100% intensity corresponds to 1.0. /// If the f32 is outside this range, conversion results may be surprising. /// -/// These are image types which typically used for computer vision and graphics applications. +/// These are image type which typically used for computer vision and graphics applications. pub trait IntensityMutImage< const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > { /// Shared tensor type - type GenArcImage; + type GenArcImage; /// Mutable tensor type - type GenMutImage; + type GenMutImage; /// Pixel type - type Pixel; + type Pixel; /// Converts a pixel to a grayscale value. fn pixel_to_grayscale(pixel: &STensor) -> Scalar; @@ -347,8 +346,8 @@ pub trait IntensityMutImage< fn try_into_dyn_image_view_u(img: Self) -> Option; } -impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1, 1> for MutImageU8 { - type Pixel = S; +impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1> for MutImageU8 { + type Pixel = S; fn pixel_to_grayscale(pixel: &u8) -> u8 { *pixel @@ -378,9 +377,9 @@ impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1, 1> for MutImageU8 { *p as f32 / 255.0 } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: Self) -> Self::GenMutImage { img @@ -399,8 +398,8 @@ impl<'a> IntensityMutImage<2, 0, u8, u8, 1, 1, 1> for MutImageU8 { } } -impl IntensityMutImage<2, 0, u16, u16, 1, 1, 1> for MutImageU16 { - type Pixel = S; +impl IntensityMutImage<2, 0, u16, u16, 1, 1> for MutImageU16 { + type Pixel = S; fn pixel_to_grayscale(pixel: &u16) -> u16 { *pixel @@ -430,9 +429,9 @@ impl IntensityMutImage<2, 0, u16, u16, 1, 1, 1> for MutImageU16 { *p as f32 / 65535.0 } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: Self) -> Self::GenMutImage { Self::GenMutImage::::from_map(&img.image_view(), |rgb: &u16| -> u8 { @@ -451,8 +450,8 @@ impl IntensityMutImage<2, 0, u16, u16, 1, 1, 1> for MutImageU16 { } } -impl IntensityMutImage<2, 0, f32, f32, 1, 1, 1> for MutImageF32 { - type Pixel = S; +impl IntensityMutImage<2, 0, f32, f32, 1, 1> for MutImageF32 { + type Pixel = S; fn pixel_to_grayscale(pixel: &f32) -> f32 { *pixel @@ -482,9 +481,9 @@ impl IntensityMutImage<2, 0, f32, f32, 1, 1, 1> for MutImageF32 { *p } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: Self) -> Self::GenMutImage { Self::GenMutImage::::from_map(&img.image_view(), |rgb: &f32| -> u8 { @@ -503,8 +502,8 @@ impl IntensityMutImage<2, 0, f32, f32, 1, 1, 1> for MutImageF32 { } } -impl IntensityMutImage<3, 1, u8, SVec, 4, 1, 1> for MutImage4U8 { - type Pixel = SVec; +impl IntensityMutImage<3, 1, u8, SVec, 4, 1> for MutImage4U8 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { pixel[0] @@ -541,9 +540,9 @@ impl IntensityMutImage<3, 1, u8, SVec, 4, 1, 1> for MutImage4U8 { ) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: Self) -> Self::GenMutImage { img @@ -566,19 +565,18 @@ impl IntensityMutImage<3, 1, u8, SVec, 4, 1, 1> for MutImage4U8 { pub trait IntensityArcImage< const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > { /// Shared tensor type - type GenArcImage; + type GenArcImage; /// Mutable tensor type - type GenMutImage; + type GenMutImage; /// Pixel type - type Pixel; + type Pixel; /// Converts a pixel to a grayscale value. fn pixel_to_grayscale(pixel: &STensor) -> Scalar; @@ -617,8 +615,8 @@ pub trait IntensityArcImage< fn try_into_dyn_image_view_u(img: &Self) -> Option; } -impl IntensityArcImage<2, 0, u8, u8, 1, 1, 1> for ArcImageU8 { - type Pixel = S; +impl IntensityArcImage<2, 0, u8, u8, 1, 1> for ArcImageU8 { + type Pixel = S; fn pixel_to_grayscale(pixel: &u8) -> u8 { *pixel @@ -648,9 +646,9 @@ impl IntensityArcImage<2, 0, u8, u8, 1, 1, 1> for ArcImageU8 { *p as f32 / 255.0 } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: &Self) -> Self::GenArcImage { img.clone() @@ -669,8 +667,8 @@ impl IntensityArcImage<2, 0, u8, u8, 1, 1, 1> for ArcImageU8 { } } -impl IntensityArcImage<2, 0, u16, u16, 1, 1, 1> for ArcImageU16 { - type Pixel = S; +impl IntensityArcImage<2, 0, u16, u16, 1, 1> for ArcImageU16 { + type Pixel = S; fn pixel_to_grayscale(pixel: &u16) -> u16 { *pixel @@ -702,9 +700,9 @@ impl IntensityArcImage<2, 0, u16, u16, 1, 1, 1> for ArcImageU16 { *p as f32 / 65535.0 } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &u16| -> u8 { @@ -723,8 +721,8 @@ impl IntensityArcImage<2, 0, u16, u16, 1, 1, 1> for ArcImageU16 { } } -impl IntensityArcImage<2, 0, f32, f32, 1, 1, 1> for ArcImageF32 { - type Pixel = S; +impl IntensityArcImage<2, 0, f32, f32, 1, 1> for ArcImageF32 { + type Pixel = S; fn pixel_to_grayscale(pixel: &f32) -> f32 { *pixel @@ -754,9 +752,9 @@ impl IntensityArcImage<2, 0, f32, f32, 1, 1, 1> for ArcImageF32 { *p } - type GenArcImage = ArcImage; + type GenArcImage = ArcImage; - type GenMutImage = MutImage; + type GenMutImage = MutImage; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &f32| -> u8 { @@ -775,8 +773,8 @@ impl IntensityArcImage<2, 0, f32, f32, 1, 1, 1> for ArcImageF32 { } } -impl IntensityArcImage<3, 1, u8, SVec, 2, 1, 1> for ArcImage2U8 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u8, SVec, 2, 1> for ArcImage2U8 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { pixel[0] @@ -808,9 +806,9 @@ impl IntensityArcImage<3, 1, u8, SVec, 2, 1, 1> for ArcImage2U8 { SVec::::new(p[0] as f32 / 255.0, p[1] as f32 / 255.0) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { img.clone() @@ -829,8 +827,8 @@ impl IntensityArcImage<3, 1, u8, SVec, 2, 1, 1> for ArcImage2U8 { } } -impl IntensityArcImage<3, 1, u8, SVec, 3, 1, 1> for ArcImage3U8 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u8, SVec, 3, 1> for ArcImage3U8 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { pixel[0] @@ -866,9 +864,9 @@ impl IntensityArcImage<3, 1, u8, SVec, 3, 1, 1> for ArcImage3U8 { ) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { img.clone() @@ -887,8 +885,8 @@ impl IntensityArcImage<3, 1, u8, SVec, 3, 1, 1> for ArcImage3U8 { } } -impl IntensityArcImage<3, 1, u8, SVec, 4, 1, 1> for ArcImage4U8 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u8, SVec, 4, 1> for ArcImage4U8 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u8 { pixel[0] @@ -925,9 +923,9 @@ impl IntensityArcImage<3, 1, u8, SVec, 4, 1, 1> for ArcImage4U8 { ) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { img.clone() @@ -946,8 +944,8 @@ impl IntensityArcImage<3, 1, u8, SVec, 4, 1, 1> for ArcImage4U8 { } } -impl IntensityArcImage<3, 1, u16, SVec, 2, 1, 1> for ArcImage2U16 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u16, SVec, 2, 1> for ArcImage2U16 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { pixel[0] @@ -982,9 +980,9 @@ impl IntensityArcImage<3, 1, u16, SVec, 2, 1, 1> for ArcImage2U16 { SVec::::new(p[0] as f32 / 65535.0, p[1] as f32 / 65535.0) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { @@ -1004,8 +1002,8 @@ impl IntensityArcImage<3, 1, u16, SVec, 2, 1, 1> for ArcImage2U16 { } } -impl IntensityArcImage<3, 1, u16, SVec, 3, 1, 1> for ArcImage3U16 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u16, SVec, 3, 1> for ArcImage3U16 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { pixel[0] @@ -1045,9 +1043,9 @@ impl IntensityArcImage<3, 1, u16, SVec, 3, 1, 1> for ArcImage3U16 { ) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { @@ -1067,8 +1065,8 @@ impl IntensityArcImage<3, 1, u16, SVec, 3, 1, 1> for ArcImage3U16 { } } -impl IntensityArcImage<3, 1, u16, SVec, 4, 1, 1> for ArcImage4U16 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, u16, SVec, 4, 1> for ArcImage4U16 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> u16 { pixel[0] @@ -1110,9 +1108,9 @@ impl IntensityArcImage<3, 1, u16, SVec, 4, 1, 1> for ArcImage4U16 { ) } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { @@ -1132,8 +1130,8 @@ impl IntensityArcImage<3, 1, u16, SVec, 4, 1, 1> for ArcImage4U16 { } } -impl IntensityArcImage<3, 1, f32, SVec, 2, 1, 1> for ArcImage2F32 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, f32, SVec, 2, 1> for ArcImage2F32 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { pixel[0] @@ -1171,9 +1169,9 @@ impl IntensityArcImage<3, 1, f32, SVec, 2, 1, 1> for ArcImage2F32 { *p } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { @@ -1193,8 +1191,8 @@ impl IntensityArcImage<3, 1, f32, SVec, 2, 1, 1> for ArcImage2F32 { } } -impl IntensityArcImage<3, 1, f32, SVec, 3, 1, 1> for ArcImage3F32 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, f32, SVec, 3, 1> for ArcImage3F32 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { pixel[0] @@ -1234,9 +1232,9 @@ impl IntensityArcImage<3, 1, f32, SVec, 3, 1, 1> for ArcImage3F32 { *p } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { @@ -1256,8 +1254,8 @@ impl IntensityArcImage<3, 1, f32, SVec, 3, 1, 1> for ArcImage3F32 { } } -impl IntensityArcImage<3, 1, f32, SVec, 4, 1, 1> for ArcImage4F32 { - type Pixel = SVec; +impl IntensityArcImage<3, 1, f32, SVec, 4, 1> for ArcImage4F32 { + type Pixel = SVec; fn pixel_to_grayscale(pixel: &SVec) -> f32 { pixel[0] @@ -1299,9 +1297,9 @@ impl IntensityArcImage<3, 1, f32, SVec, 4, 1, 1> for ArcImage4F32 { *p } - type GenArcImage = ArcImageR; + type GenArcImage = ArcImageR; - type GenMutImage = MutImageR; + type GenMutImage = MutImageR; fn cast_u8(img: &Self) -> Self::GenArcImage { Self::GenArcImage::::from_map(&img.image_view(), |rgb: &SVec| -> SVec { diff --git a/crates/sophus_image/src/interpolation.rs b/crates/sophus_image/src/interpolation.rs index ff711ea..eb9a4d4 100644 --- a/crates/sophus_image/src/interpolation.rs +++ b/crates/sophus_image/src/interpolation.rs @@ -1,12 +1,11 @@ use crate::image_view::IsImageView; - -use sophus_tensor::element::SVec; +use sophus_core::linalg::SVec; /// Bilinear interpolated image lookup pub fn interpolate< 'a, const ROWS: usize, - I: IsImageView<'a, 3, 1, f32, SVec, ROWS, 1, 1>, + I: IsImageView<'a, 3, 1, f32, SVec, ROWS, 1>, >( img: &'a I, uv: nalgebra::Vector2, diff --git a/crates/sophus_image/src/lib.rs b/crates/sophus_image/src/lib.rs index bb6fdbf..61c27c3 100644 --- a/crates/sophus_image/src/lib.rs +++ b/crates/sophus_image/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(portable_simd)] #![deny(missing_docs)] //! # image module diff --git a/crates/sophus_image/src/mut_image.rs b/crates/sophus_image/src/mut_image.rs index fc55828..8bd53e6 100644 --- a/crates/sophus_image/src/mut_image.rs +++ b/crates/sophus_image/src/mut_image.rs @@ -4,37 +4,36 @@ use crate::image_view::ImageSize; use crate::image_view::IsImageView; use crate::mut_image_view::IsMutImageView; -use sophus_tensor::element::IsStaticTensor; -use sophus_tensor::element::IsTensorScalar; -use sophus_tensor::element::SVec; -use sophus_tensor::mut_tensor::MutTensor; -use sophus_tensor::mut_view::IsMutTensorLike; -use sophus_tensor::view::IsTensorView; -use sophus_tensor::view::TensorView; +use sophus_core::linalg::scalar::IsCoreScalar; +use sophus_core::linalg::SVec; +use sophus_core::tensor::element::IsStaticTensor; +use sophus_core::tensor::mut_tensor::MutTensor; +use sophus_core::tensor::mut_tensor_view::IsMutTensorLike; +use sophus_core::tensor::tensor_view::IsTensorView; +use sophus_core::tensor::tensor_view::TensorView; /// Mutable image of static tensors #[derive(Debug, Clone, Default)] pub struct GenMutImage< const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > { /// underlying mutable tensor - pub mut_tensor: MutTensor, + pub mut_tensor: MutTensor, } /// Mutable image of scalar values -pub type MutImage = GenMutImage<2, 0, Scalar, Scalar, 1, 1, 1>; +pub type MutImage = GenMutImage<2, 0, Scalar, Scalar, 1, 1>; /// Mutable image of vector values /// /// Here, R indicates the number of rows in the vector pub type MutImageR = - GenMutImage<3, 1, Scalar, SVec, ROWS, 1, 1>; + GenMutImage<3, 1, Scalar, SVec, ROWS, 1>; /// Mutable image of u8 scalars pub type MutImageU8 = MutImage; @@ -66,11 +65,10 @@ pub trait IsMutImage< 'a, const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { @@ -84,13 +82,13 @@ macro_rules! mut_image { ($scalar_rank:literal, $srank:literal) => { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + + > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { @@ -103,15 +101,14 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS > { let v = self.mut_tensor.view(); GenImageView { tensor_view: v } } fn pixel(&'a self, u: usize, v: usize) -> STensor { - self.mut_tensor.mut_array[[v, u]] + self.mut_tensor.mut_array[[v, u]].clone() } fn image_size(&self) -> crate::image_view::ImageSize { @@ -121,12 +118,12 @@ macro_rules! mut_image { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + + > GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { /// creates a mutable image view from image size pub fn from_image_size(size: crate::image_view::ImageSize) -> Self { @@ -138,10 +135,10 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS >::from_shape(size.into()), } + } /// creates a mutable image from image size and value @@ -154,15 +151,14 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS >::from_shape_and_val(size.into(), val), } } /// creates a mutable image from image view pub fn make_copy_from( - v: &GenImageView<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + v: &GenImageView<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS>, ) -> Self { Self { mut_tensor: MutTensor::< @@ -172,8 +168,7 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS >::make_copy_from(&v.tensor_view), } } @@ -187,7 +182,6 @@ macro_rules! mut_image { STensor, ROWS, COLS, - BATCH_SIZE, >::from_size_and_slice(image_size, slice)) } @@ -235,17 +229,15 @@ macro_rules! mut_image { 'b, const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar + 'static, + OtherScalar: IsCoreScalar + 'static, OtherSTensor: IsStaticTensor< OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, > + 'static, const OTHER_ROWS: usize, const OTHER_COLS: usize, - const OTHER_BATCHES: usize, F: FnMut(&OtherSTensor)-> STensor >( v: &'b GenImageView::< @@ -256,16 +248,15 @@ macro_rules! mut_image { OtherSTensor, OTHER_ROWS, OTHER_COLS, - OTHER_BATCHES, >, op: F, ) -> Self where ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, TensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES>: + OTHER_ROWS, OTHER_COLS>: IsTensorView<'b, OTHER_HRANK, 2, OTHER_SRANK, OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES>, + OTHER_ROWS, OTHER_COLS>, { Self { @@ -276,8 +267,7 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS >::from_map(&v.tensor_view, op), } } @@ -291,8 +281,7 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS > { GenArcImage { tensor: self.mut_tensor.to_shared(), @@ -302,13 +291,13 @@ macro_rules! mut_image { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + + > IsMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenMutImage<$scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { @@ -321,8 +310,7 @@ macro_rules! mut_image { Scalar, STensor, ROWS, - COLS, - BATCH_SIZE, + COLS > { crate::mut_image_view::GenMutImageView { mut_tensor_view: self.mut_tensor.mut_view(), diff --git a/crates/sophus_image/src/mut_image_view.rs b/crates/sophus_image/src/mut_image_view.rs index 7f20e8c..550274f 100644 --- a/crates/sophus_image/src/mut_image_view.rs +++ b/crates/sophus_image/src/mut_image_view.rs @@ -2,11 +2,11 @@ use crate::image_view::GenImageView; use crate::image_view::ImageSize; use crate::image_view::IsImageView; -use sophus_tensor::element::IsStaticTensor; -use sophus_tensor::element::IsTensorScalar; -use sophus_tensor::mut_view::IsMutTensorLike; -use sophus_tensor::mut_view::MutTensorView; -use sophus_tensor::view::IsTensorLike; +use sophus_core::linalg::scalar::IsCoreScalar; +use sophus_core::tensor::element::IsStaticTensor; +use sophus_core::tensor::mut_tensor_view::IsMutTensorLike; +use sophus_core::tensor::mut_tensor_view::MutTensorView; +use sophus_core::tensor::tensor_view::IsTensorLike; /// Mutable image view of a static tensors #[derive(Debug, PartialEq)] @@ -14,30 +14,27 @@ pub struct GenMutImageView< 'a, const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { /// underlying mutable tensor view - pub mut_tensor_view: - MutTensorView<'a, TOTAL_RANK, 2, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, + pub mut_tensor_view: MutTensorView<'a, TOTAL_RANK, 2, SRANK, Scalar, STensor, ROWS, COLS>, } macro_rules! mut_image_view { ($scalar_rank:literal, $srank:literal) => { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { @@ -50,8 +47,7 @@ macro_rules! mut_image_view { fn image_view( &'a self, - ) -> GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { + ) -> GenImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { let view = self.mut_tensor_view.view(); GenImageView { tensor_view: view } } @@ -63,32 +59,20 @@ macro_rules! mut_image_view { impl< 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, - > IsMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - for GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> + > IsMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> + for GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> where - MutTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>: - IsMutTensorLike< - 'a, - $scalar_rank, - 2, - $srank, - Scalar, - STensor, - ROWS, - COLS, - BATCH_SIZE, - >, + MutTensorView<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>: + IsMutTensorLike<'a, $scalar_rank, 2, $srank, Scalar, STensor, ROWS, COLS>, ndarray::Dim<[ndarray::Ix; $scalar_rank]>: ndarray::Dimension, { fn mut_image_view<'b: 'a>( &'b mut self, - ) -> GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { + ) -> GenMutImageView<'a, $scalar_rank, $srank, Scalar, STensor, ROWS, COLS> { GenMutImageView { mut_tensor_view: MutTensorView::< 'a, @@ -99,7 +83,6 @@ macro_rules! mut_image_view { STensor, ROWS, COLS, - BATCH_SIZE, >::new( self.mut_tensor_view.elem_view_mut.view_mut() ), @@ -125,18 +108,17 @@ pub trait IsMutImageView< 'a, const TOTAL_RANK: usize, const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, + Scalar: IsCoreScalar + 'static, + STensor: IsStaticTensor + 'static, const ROWS: usize, const COLS: usize, - const BATCH_SIZE: usize, > where ndarray::Dim<[ndarray::Ix; TOTAL_RANK]>: ndarray::Dimension, { /// returns mutable image view fn mut_image_view<'b: 'a>( &'b mut self, - ) -> GenMutImageView<'a, TOTAL_RANK, SRANK, Scalar, STensor, ROWS, COLS, BATCH_SIZE>; + ) -> GenMutImageView<'a, TOTAL_RANK, SRANK, Scalar, STensor, ROWS, COLS>; /// returns mutable u,v pixel fn mut_pixel(&'a mut self, u: usize, v: usize) -> &mut STensor; diff --git a/crates/sophus_lie/Cargo.toml b/crates/sophus_lie/Cargo.toml index e9b6797..11bc3c6 100644 --- a/crates/sophus_lie/Cargo.toml +++ b/crates/sophus_lie/Cargo.toml @@ -11,9 +11,9 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_calculus.workspace = true -sophus_tensor.workspace = true +sophus_core.workspace = true approx.workspace = true assertables.workspace = true nalgebra.workspace = true +num-traits.workspace = true diff --git a/crates/sophus_lie/src/factor_lie_group.rs b/crates/sophus_lie/src/factor_lie_group.rs new file mode 100644 index 0000000..55b1fd9 --- /dev/null +++ b/crates/sophus_lie/src/factor_lie_group.rs @@ -0,0 +1,177 @@ +use crate::groups::rotation2::Rotation2; +use crate::groups::rotation3::Rotation3; +use crate::lie_group::LieGroup; +use crate::traits::IsRealLieFactorGroupImpl; +use approx::assert_relative_eq; +use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; +use sophus_core::calculus::dual::dual_scalar::DualScalar; +use sophus_core::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::BatchScalarF64; + +impl< + S: IsRealScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const BATCH_SIZE: usize, + G: IsRealLieFactorGroupImpl, + > LieGroup +{ + /// V matrix - used in the exponential map + pub fn mat_v(tangent: &S::Vector) -> S::Matrix { + G::mat_v(tangent) + } + + /// V matrix inverse - used in the logarithmic map + pub fn mat_v_inverse(tangent: &S::Vector) -> S::Matrix { + G::mat_v_inverse(tangent) + } + + /// derivative of V matrix + pub fn dx_mat_v(tangent: &S::Vector) -> [S::Matrix; DOF] { + G::dx_mat_v(tangent) + } + + /// derivative of V matrix inverse + pub fn dx_mat_v_inverse(tangent: &S::Vector) -> [S::Matrix; DOF] { + G::dx_mat_v_inverse(tangent) + } + + /// derivative of V matrix times point + pub fn dparams_matrix_times_point( + params: &S::Vector, + point: &S::Vector, + ) -> S::Matrix { + G::dparams_matrix_times_point(params, point) + } +} + +/// A trait for Lie groups. +pub trait RealFactorLieGroupTest { + /// Run all tests. + fn run_real_factor_tests() { + Self::mat_v_test(); + Self::test_mat_v_jacobian(); + } + + /// Test mat_v and mat_v_inverse. + fn mat_v_test(); + + /// Test hat and vee operators. + fn test_mat_v_jacobian(); +} + +macro_rules! def_real_group_test_template { + ($scalar:ty, $dual_scalar:ty, $group: ty, $dual_group: ty, $batch:literal +) => { + impl RealFactorLieGroupTest for $group { + fn mat_v_test() { + use crate::traits::IsLieGroup; + use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::linalg::scalar::IsScalar; + + const POINT: usize = <$group>::POINT; + + for t in <$group>::tangent_examples() { + let mat_v = Self::mat_v(&t); + let mat_v_inverse = Self::mat_v_inverse(&t); + + assert_relative_eq!( + mat_v.mat_mul(mat_v_inverse), + <$scalar as IsScalar<$batch>>::Matrix::::identity(), + epsilon = 0.0001 + ); + } + } + + fn test_mat_v_jacobian() { + use crate::traits::IsLieGroup; + use sophus_core::calculus::dual::dual_scalar::IsDualScalar; + use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use sophus_core::linalg::scalar::IsScalar; + use sophus_core::linalg::vector::IsVector; + use sophus_core::params::HasParams; + use sophus_core::points::example_points; + + const DOF: usize = <$group>::DOF; + const POINT: usize = <$group>::POINT; + const PARAMS: usize = <$group>::PARAMS; + use sophus_core::tensor::tensor_view::IsTensorLike; + + for t in <$group>::tangent_examples() { + let mat_v_jacobian = Self::dx_mat_v(&t); + + let mat_v_x = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Matrix + { + Self::mat_v(&t) + }; + + let num_diff = MatrixValuedMapFromVector::<$scalar, $batch>::sym_diff_quotient( + mat_v_x, t, 0.0001, + ); + + for i in 0..DOF { + println!("i: {}", i); + assert_relative_eq!(mat_v_jacobian[i], num_diff.get([i]), epsilon = 0.001); + } + + let mat_v_inv_jacobian = Self::dx_mat_v_inverse(&t); + + let mat_v_x_inv = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Matrix { Self::mat_v_inverse(&t) }; + let num_diff = MatrixValuedMapFromVector::sym_diff_quotient(mat_v_x_inv, t, 0.0001); + + for i in 0..DOF { + println!("i: {}", i); + assert_relative_eq!(mat_v_inv_jacobian[i], num_diff.get([i]), epsilon = 0.001); + } + } + for p in example_points::<$scalar, POINT, $batch>() { + for a in Self::element_examples() { + let dual_params_a = <$dual_scalar>::vector_v(*a.clone().params()); + let _dual_a = <$dual_group>::from_params(&dual_params_a); + let dual_p = + <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector(p.clone()); + + let dual_fn = |x: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::from_params(&x).matrix() * dual_p.clone() + }; + + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_fn, + *a.params(), + ); + let analytic_diff = Self::dparams_matrix_times_point(a.params(), &p); + assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); + } + } + } + } + }; +} + +def_real_group_test_template!(f64, DualScalar, Rotation2, Rotation2, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Rotation2, 8>, + Rotation2, 8>, + 8 +); + +def_real_group_test_template!(f64, DualScalar, Rotation3, Rotation3, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Rotation3, 8>, + Rotation3, 8>, + 8 +); diff --git a/crates/sophus_lie/src/groups.rs b/crates/sophus_lie/src/groups.rs new file mode 100644 index 0000000..c6de3a3 --- /dev/null +++ b/crates/sophus_lie/src/groups.rs @@ -0,0 +1,10 @@ +/// 2d isometry +pub mod isometry2; +/// 3d isometry +pub mod isometry3; +/// 2d rotation +pub mod rotation2; +/// 3d rotation +pub mod rotation3; +/// semi-direct product +pub mod translation_product_product; diff --git a/crates/sophus_lie/src/groups/isometry2.rs b/crates/sophus_lie/src/groups/isometry2.rs new file mode 100644 index 0000000..decb763 --- /dev/null +++ b/crates/sophus_lie/src/groups/isometry2.rs @@ -0,0 +1,49 @@ +use super::rotation2::Rotation2Impl; +use super::translation_product_product::TranslationProductGroupImpl; +use crate::groups::rotation2::Rotation2; +use crate::lie_group::LieGroup; +use crate::traits::IsTranslationProductGroup; +use sophus_core::linalg::scalar::IsScalar; + +/// 2D isometry group implementation struct - SE(2) +pub type Isometry2Impl = + TranslationProductGroupImpl>; + +/// 2D isometry group - SE(2) +pub type Isometry2 = LieGroup>; + +impl, const BATCH: usize> Isometry2 { + /// create isometry from translation and rotation + pub fn from_translation_and_rotation( + translation: &S::Vector<2>, + rotation: &Rotation2, + ) -> Self { + Self::from_translation_and_factor(translation, rotation) + } + + /// set rotation + pub fn set_rotation(&mut self, rotation: &Rotation2) { + self.set_factor(rotation) + } + + /// get rotation + pub fn rotation(&self) -> Rotation2 { + self.factor() + } +} + +#[test] +fn isometry2_prop_tests() { + use crate::real_lie_group::RealLieGroupTest; + use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; + use sophus_core::calculus::dual::dual_scalar::DualScalar; + use sophus_core::linalg::BatchScalarF64; + + Isometry2::::test_suite(); + Isometry2::, 8>::test_suite(); + Isometry2::::test_suite(); + Isometry2::, 8>::test_suite(); + + Isometry2::::run_real_tests(); + Isometry2::, 8>::run_real_tests(); +} diff --git a/crates/sophus_lie/src/groups/isometry3.rs b/crates/sophus_lie/src/groups/isometry3.rs new file mode 100644 index 0000000..025c876 --- /dev/null +++ b/crates/sophus_lie/src/groups/isometry3.rs @@ -0,0 +1,48 @@ +use super::rotation3::Rotation3; +use super::rotation3::Rotation3Impl; +use super::translation_product_product::TranslationProductGroupImpl; +use crate::lie_group::LieGroup; +use crate::traits::IsTranslationProductGroup; +use sophus_core::linalg::scalar::IsScalar; + +/// 3D isometry group implementation struct - SE(3) +pub type Isometry3Impl = + TranslationProductGroupImpl>; +/// 3d isometry group - SE(3) +pub type Isometry3 = LieGroup>; + +impl, const BATCH: usize> Isometry3 { + /// create isometry from translation and rotation + pub fn from_translation_and_rotation( + translation: &S::Vector<3>, + rotation: &Rotation3, + ) -> Self { + Self::from_translation_and_factor(translation, rotation) + } + + /// set rotation + pub fn set_rotation(&mut self, rotation: &Rotation3) { + self.set_factor(rotation) + } + + /// get rotation + pub fn rotation(&self) -> Rotation3 { + self.factor() + } +} + +#[test] +fn isometry3_prop_tests() { + use crate::real_lie_group::RealLieGroupTest; + use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; + use sophus_core::calculus::dual::dual_scalar::DualScalar; + use sophus_core::linalg::BatchScalarF64; + + Isometry3::::test_suite(); + Isometry3::, 8>::test_suite(); + Isometry3::::test_suite(); + Isometry3::, 8>::test_suite(); + + Isometry3::::run_real_tests(); + Isometry3::, 8>::run_real_tests(); +} diff --git a/crates/sophus_lie/src/groups/rotation2.rs b/crates/sophus_lie/src/groups/rotation2.rs new file mode 100644 index 0000000..5e3e995 --- /dev/null +++ b/crates/sophus_lie/src/groups/rotation2.rs @@ -0,0 +1,319 @@ +use crate::lie_group::LieGroup; +use crate::traits::IsLieFactorGroupImpl; +use crate::traits::IsLieGroupImpl; +use crate::traits::IsRealLieFactorGroupImpl; +use crate::traits::IsRealLieGroupImpl; +use sophus_core::calculus::manifold::{self}; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; +use std::marker::PhantomData; + +/// 2D rotation group implementation struct - SO(2) +#[derive(Debug, Copy, Clone, Default)] +pub struct Rotation2Impl, const BATCH_SIZE: usize> { + phanton: PhantomData, +} + +impl, const BATCH_SIZE: usize> Rotation2Impl {} + +impl, const BATCH_SIZE: usize> ParamsImpl + for Rotation2Impl +{ + fn params_examples() -> Vec> { + let mut params = vec![]; + for i in 0..10 { + let angle = S::from_f64(i as f64 * std::f64::consts::PI / 5.0); + params.push( + Rotation2::::exp(&S::Vector::<1>::from_array([angle])) + .params() + .clone(), + ); + } + params + } + + fn invalid_params_examples() -> Vec> { + vec![ + S::Vector::<2>::from_array([S::from_f64(0.0), S::from_f64(0.0)]), + S::Vector::<2>::from_array([S::from_f64(0.5), S::from_f64(0.5)]), + S::Vector::<2>::from_array([S::from_f64(0.5), S::from_f64(-0.5)]), + ] + } + + fn are_params_valid(params: &S::Vector<2>) -> S::Mask { + let norm = params.norm(); + (norm - S::from_f64(1.0)) + .abs() + .less_equal(&S::from_f64(1e-6)) + } +} + +impl, const BATCH_SIZE: usize> + manifold::traits::TangentImpl for Rotation2Impl +{ + fn tangent_examples() -> Vec> { + vec![ + S::Vector::<1>::from_array([S::from_f64(0.0)]), + S::Vector::<1>::from_array([S::from_f64(1.0)]), + S::Vector::<1>::from_array([S::from_f64(-1.0)]), + S::Vector::<1>::from_array([S::from_f64(0.5)]), + S::Vector::<1>::from_array([S::from_f64(-0.4)]), + ] + } +} + +impl, const BATCH_SIZE: usize> + crate::traits::IsLieGroupImpl for Rotation2Impl +{ + type GenG> = Rotation2Impl; + type RealG = Rotation2Impl; + type DualG = Rotation2Impl; + + const IS_ORIGIN_PRESERVING: bool = true; + const IS_AXIS_DIRECTION_PRESERVING: bool = false; + const IS_DIRECTION_VECTOR_PRESERVING: bool = false; + const IS_SHAPE_PRESERVING: bool = true; + const IS_DISTANCE_PRESERVING: bool = true; + const IS_PARALLEL_LINE_PRESERVING: bool = true; + + fn identity_params() -> S::Vector<2> { + S::Vector::<2>::from_array([S::ones(), S::zeros()]) + } + + fn adj(_params: &S::Vector<2>) -> S::Matrix<1, 1> { + S::Matrix::<1, 1>::identity() + } + + fn exp(omega: &S::Vector<1>) -> S::Vector<2> { + // angle to complex number + let angle = omega.get_elem(0); + let cos = angle.clone().cos(); + let sin = angle.sin(); + S::Vector::<2>::from_array([cos, sin]) + } + + fn log(params: &S::Vector<2>) -> S::Vector<1> { + // complex number to angle + let angle = params.get_elem(1).atan2(params.get_elem(0)); + S::Vector::<1>::from_array([angle]) + } + + fn hat(omega: &S::Vector<1>) -> S::Matrix<2, 2> { + let angle = omega.clone().get_elem(0); + S::Matrix::<2, 2>::from_array2([[S::zeros(), -angle.clone()], [angle, S::zeros()]]) + } + + fn vee(hat: &S::Matrix<2, 2>) -> S::Vector<1> { + let angle = hat.get_elem([1, 0]); + S::Vector::<1>::from_array([angle]) + } + + fn group_mul(params1: &S::Vector<2>, params2: &S::Vector<2>) -> S::Vector<2> { + let a = params1.get_elem(0); + let b = params1.get_elem(1); + let c = params2.get_elem(0); + let d = params2.get_elem(1); + + S::Vector::<2>::from_array([a.clone() * c.clone() - d.clone() * b.clone(), a * d + b * c]) + } + + fn inverse(params: &S::Vector<2>) -> S::Vector<2> { + S::Vector::<2>::from_array([params.get_elem(0), -params.get_elem(1)]) + } + + fn transform(params: &S::Vector<2>, point: &S::Vector<2>) -> S::Vector<2> { + Self::matrix(params) * point.clone() + } + + fn to_ambient(params: &S::Vector<2>) -> S::Vector<2> { + // homogeneous coordinates + params.clone() + } + + fn compact(params: &S::Vector<2>) -> S::Matrix<2, 2> { + Self::matrix(params) + } + + fn matrix(params: &S::Vector<2>) -> S::Matrix<2, 2> { + // rotation matrix + let cos = params.get_elem(0); + let sin = params.get_elem(1); + S::Matrix::<2, 2>::from_array2([[cos.clone(), -sin.clone()], [sin, cos]]) + } + + fn ad(_tangent: &S::Vector<1>) -> S::Matrix<1, 1> { + S::Matrix::zeros() + } +} + +impl, const BATCH_SIZE: usize> + IsRealLieGroupImpl for Rotation2Impl +{ + fn dx_exp_x_at_0() -> S::Matrix<2, 1> { + S::Matrix::from_real_array2([[S::RealScalar::zeros()], [S::RealScalar::ones()]]) + } + + fn dx_exp_x_times_point_at_0(point: S::Vector<2>) -> S::Matrix<2, 1> { + S::Matrix::from_array2([[-point.get_elem(1)], [point.get_elem(0)]]) + } + + fn dx_exp(tangent: &S::Vector<1>) -> S::Matrix<2, 1> { + let theta = tangent.get_elem(0); + S::Matrix::<2, 1>::from_array2([[-theta.sin()], [theta.cos()]]) + } + + fn dx_log_x(params: &S::Vector<2>) -> S::Matrix<1, 2> { + let x_0 = params.get_elem(0); + let x_1 = params.get_elem(1); + let x_sq = x_0 * x_0 + x_1 * x_1; + S::Matrix::from_array2([[-x_1 / x_sq, x_0 / x_sq]]) + } + + fn da_a_mul_b(_a: &S::Vector<2>, b: &S::Vector<2>) -> S::Matrix<2, 2> { + Self::matrix(b) + } + + fn db_a_mul_b(a: &S::Vector<2>, _b: &S::Vector<2>) -> S::Matrix<2, 2> { + Self::matrix(a) + } + + fn has_shortest_path_ambiguity(params: &S::Vector<2>) -> S::Mask { + (Self::log(params).vector().get_elem(0).abs() - S::from_f64(std::f64::consts::PI)) + .abs() + .less_equal(&S::from_f64(1e-5)) + } +} + +/// 2d rotation group - SO(2) +pub type Rotation2 = LieGroup>; + +impl, const BATCH_SIZE: usize> IsLieFactorGroupImpl + for Rotation2Impl +{ + type GenFactorG> = Rotation2Impl; + type RealFactorG = Rotation2Impl; + type DualFactorG = Rotation2Impl; + + fn mat_v(v: &S::Vector<1>) -> S::Matrix<2, 2> { + let one_minus_cos_theta_by_theta: S; + let theta = v.get_elem(0); + let abs_theta = theta.clone().abs(); + + let near_zero = abs_theta.less_equal(&S::from_f64(1e-6)); + + let theta_sq = theta.clone() * theta.clone(); + let sin_theta_by_theta = (S::from_f64(1.0) - S::from_f64(1.0 / 6.0) * theta_sq.clone()) + .select(&near_zero, theta.clone().sin() / theta.clone()); + one_minus_cos_theta_by_theta = (S::from_f64(0.5) * theta.clone() + - S::from_f64(1.0 / 24.0) * theta.clone() * theta_sq) + .select(&near_zero, (S::from_f64(1.0) - theta.clone().cos()) / theta); + + S::Matrix::<2, 2>::from_array2([ + [ + sin_theta_by_theta.clone(), + -one_minus_cos_theta_by_theta.clone(), + ], + [one_minus_cos_theta_by_theta, sin_theta_by_theta], + ]) + } + + fn mat_v_inverse(tangent: &S::Vector<1>) -> S::Matrix<2, 2> { + let theta = tangent.get_elem(0); + let halftheta = S::from_f64(0.5) * theta.clone(); + + let real_minus_one = theta.clone().cos() - S::from_f64(1.0); + let abs_real_minus_one = real_minus_one.clone().abs(); + + let near_zero = abs_real_minus_one.less_equal(&S::from_f64(1e-6)); + + let halftheta_by_tan_of_halftheta = (S::from_f64(1.0) + - S::from_f64(1.0 / 12.0) * tangent.get_elem(0) * tangent.get_elem(0)) + .select( + &near_zero, + -(halftheta.clone() * theta.sin()) / real_minus_one, + ); + + S::Matrix::<2, 2>::from_array2([ + [halftheta_by_tan_of_halftheta.clone(), halftheta.clone()], + [-halftheta, halftheta_by_tan_of_halftheta], + ]) + } + + fn adj_of_translation(_params: &S::Vector<2>, point: &S::Vector<2>) -> S::Matrix<2, 1> { + S::Matrix::<2, 1>::from_array2([[point.get_elem(1)], [-point.get_elem(0)]]) + } + + fn ad_of_translation(point: &S::Vector<2>) -> S::Matrix<2, 1> { + S::Matrix::<2, 1>::from_array2([[point.get_elem(1)], [-point.get_elem(0)]]) + } +} + +impl, const BATCH_SIZE: usize> + IsRealLieFactorGroupImpl for Rotation2Impl +{ + fn dx_mat_v(tangent: &S::Vector<1>) -> [S::Matrix<2, 2>; 1] { + let theta = tangent.get_elem(0); + let theta_sq = theta * theta; + let sin_theta = theta.sin(); + let cos_theta = theta.cos(); + + let near_zero = theta_sq.abs().less_equal(&S::from_f64(1e-6)); + + let m00 = (S::from_f64(-1.0 / 3.0) * theta + S::from_f64(1.0 / 30.0) * theta * theta_sq) + .select(&near_zero, (theta * cos_theta - sin_theta) / theta_sq); + let m01 = (-S::from_f64(0.5) + S::from_f64(0.125) * theta_sq).select( + &near_zero, + (-theta * sin_theta - cos_theta + S::from_f64(1.0)) / theta_sq, + ); + + [S::Matrix::<2, 2>::from_array2([[m00, m01], [-m01, m00]])] + } + + fn dparams_matrix_times_point(_params: &S::Vector<2>, point: &S::Vector<2>) -> S::Matrix<2, 2> { + let px = point.get_elem(0); + let py = point.get_elem(1); + S::Matrix::from_array2([[px, -py], [py, px]]) + } + + fn dx_mat_v_inverse(tangent: &S::Vector<1>) -> [S::Matrix<2, 2>; 1] { + let theta = tangent.get_elem(0); + let sin_theta = theta.sin(); + let cos_theta = theta.cos(); + + let near_zero = theta.abs().less_equal(&S::from_f64(1e-6)); + + let c = (S::from_f64(-1.0 / 6.0) * theta).select( + &near_zero, + (theta - sin_theta) / (S::from_f64(2.0) * (cos_theta - S::from_f64(1.0))), + ); + + [S::Matrix::<2, 2>::from_array2([ + [c, S::from_f64(0.5)], + [-S::from_f64(0.5), c], + ])] + } +} + +#[test] +fn rotation2_prop_tests() { + use crate::factor_lie_group::RealFactorLieGroupTest; + use crate::real_lie_group::RealLieGroupTest; + use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; + use sophus_core::calculus::dual::dual_scalar::DualScalar; + use sophus_core::linalg::BatchScalarF64; + + Rotation2::::test_suite(); + Rotation2::, 8>::test_suite(); + Rotation2::::test_suite(); + Rotation2::, 8>::test_suite(); + + Rotation2::::run_real_tests(); + Rotation2::, 8>::run_real_tests(); + + Rotation2::::run_real_factor_tests(); + Rotation2::, 8>::run_real_factor_tests(); +} diff --git a/crates/sophus_lie/src/groups/rotation3.rs b/crates/sophus_lie/src/groups/rotation3.rs new file mode 100644 index 0000000..bb19e3f --- /dev/null +++ b/crates/sophus_lie/src/groups/rotation3.rs @@ -0,0 +1,635 @@ +use crate::lie_group::LieGroup; +use crate::traits::IsLieGroupImpl; +use crate::traits::IsRealLieFactorGroupImpl; +use crate::traits::IsRealLieGroupImpl; +use sophus_core::calculus::manifold::{self}; +use sophus_core::linalg::bool_mask::BoolMask; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::cross; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; +use std::marker::PhantomData; + +/// 3d rotation implementation - SO(3) +#[derive(Debug, Copy, Clone, Default)] +pub struct Rotation3Impl, const BATCH: usize> { + phantom: PhantomData, +} + +impl, const BATCH: usize> ParamsImpl for Rotation3Impl { + fn params_examples() -> Vec> { + let mut params = vec![]; + + params.push( + Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.0, 0.0, 0.0])) + .params() + .clone(), + ); + params.push( + Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.1, 0.5, -0.1])) + .params() + .clone(), + ); + params.push( + Rotation3::::exp(&S::Vector::<3>::from_f64_array([0.0, 0.2, 1.0])) + .params() + .clone(), + ); + params.push( + Rotation3::::exp(&S::Vector::<3>::from_f64_array([-0.2, 0.0, 0.8])) + .params() + .clone(), + ); + params + } + + fn invalid_params_examples() -> Vec> { + vec![ + S::Vector::<4>::from_f64_array([0.0, 0.0, 0.0, 0.0]), + S::Vector::<4>::from_f64_array([0.5, 0.5, 0.5, 0.0]), + S::Vector::<4>::from_f64_array([0.5, (-0.5), 0.5, 1.0]), + ] + } + + fn are_params_valid(params: &S::Vector<4>) -> S::Mask { + let norm = params.norm(); + (norm - S::from_f64(1.0)) + .abs() + .less_equal(&S::from_f64(1e-6)) + } +} + +impl, const BATCH: usize> manifold::traits::TangentImpl + for Rotation3Impl +{ + fn tangent_examples() -> Vec> { + vec![ + S::Vector::<3>::from_f64_array([0.0, 0.0, 0.0]), + S::Vector::<3>::from_f64_array([1.0, 0.0, 0.0]), + S::Vector::<3>::from_f64_array([0.0, 1.0, 0.0]), + S::Vector::<3>::from_f64_array([0.0, 0.0, 1.0]), + S::Vector::<3>::from_f64_array([0.5, 0.5, 0.1]), + S::Vector::<3>::from_f64_array([-0.1, -0.5, -0.5]), + ] + } +} + +impl, const BATCH: usize> IsLieGroupImpl + for Rotation3Impl +{ + const IS_ORIGIN_PRESERVING: bool = true; + const IS_AXIS_DIRECTION_PRESERVING: bool = false; + const IS_DIRECTION_VECTOR_PRESERVING: bool = false; + const IS_SHAPE_PRESERVING: bool = true; + const IS_DISTANCE_PRESERVING: bool = true; + const IS_PARALLEL_LINE_PRESERVING: bool = true; + + fn identity_params() -> S::Vector<4> { + S::Vector::<4>::from_f64_array([1.0, 0.0, 0.0, 0.0]) + } + + fn adj(params: &S::Vector<4>) -> S::Matrix<3, 3> { + Self::matrix(params) + } + + fn exp(omega: &S::Vector<3>) -> S::Vector<4> { + const EPS: f64 = 1e-8; + let theta_sq = omega.squared_norm(); + + let theta_po4 = theta_sq.clone() * theta_sq.clone(); + let theta = theta_sq.clone().sqrt(); + let half_theta: S = S::from_f64(0.5) * theta.clone(); + + let near_zero = theta_sq.less_equal(&S::from_f64(EPS * EPS)); + + let imag_factor = (S::from_f64(0.5) - S::from_f64(1.0 / 48.0) * theta_sq.clone() + + S::from_f64(1.0 / 3840.0) * theta_po4.clone()) + .select(&near_zero, half_theta.clone().sin() / theta); + + let real_factor = (S::from_f64(1.0) - S::from_f64(1.0 / 8.0) * theta_sq + + S::from_f64(1.0 / 384.0) * theta_po4) + .select(&near_zero, half_theta.cos()); + + S::Vector::<4>::from_array([ + real_factor, + imag_factor.clone() * omega.get_elem(0), + imag_factor.clone() * omega.get_elem(1), + imag_factor * omega.get_elem(2), + ]) + } + + fn log(params: &S::Vector<4>) -> S::Vector<3> { + const EPS: f64 = 1e-8; + let ivec: S::Vector<3> = params.get_fixed_rows::<3>(1); + + let squared_n = ivec.squared_norm(); + let w = params.get_elem(0); + + let near_zero = squared_n.less_equal(&S::from_f64(EPS * EPS)); + + let w_sq = w.clone() * w.clone(); + let t0 = S::from_f64(2.0) / w.clone() + - S::from_f64(2.0 / 3.0) * squared_n.clone() / (w_sq * w.clone()); + + let n = squared_n.sqrt(); + + let sign = S::from_f64(-1.0).select(&w.less_equal(&S::from_f64(0.0)), S::from_f64(1.0)); + let atan_nbyw = sign.clone() * n.clone().atan2(sign * w); + + let t = S::from_f64(2.0) * atan_nbyw / n; + + let two_atan_nbyd_by_n = t0.select(&near_zero, t); + + ivec.scaled(two_atan_nbyd_by_n) + } + + fn hat(omega: &S::Vector<3>) -> S::Matrix<3, 3> { + let o0 = omega.get_elem(0); + let o1 = omega.get_elem(1); + let o2 = omega.get_elem(2); + + S::Matrix::from_array2([ + [S::zero(), -o2.clone(), o1.clone()], + [o2, S::zero(), -o0.clone()], + [-o1, o0, S::zero()], + ]) + } + + fn vee(omega_hat: &S::Matrix<3, 3>) -> S::Vector<3> { + S::Vector::<3>::from_array([ + omega_hat.get_elem([2, 1]), + omega_hat.get_elem([0, 2]), + omega_hat.get_elem([1, 0]), + ]) + } + + fn inverse(params: &S::Vector<4>) -> S::Vector<4> { + S::Vector::from_array([ + params.get_elem(0), + -params.get_elem(1), + -params.get_elem(2), + -params.get_elem(3), + ]) + } + + fn transform(params: &S::Vector<4>, point: &S::Vector<3>) -> S::Vector<3> { + Self::matrix(params) * point.clone() + } + + fn to_ambient(point: &S::Vector<3>) -> S::Vector<3> { + point.clone() + } + + fn compact(params: &S::Vector<4>) -> S::Matrix<3, 3> { + Self::matrix(params) + } + + fn matrix(params: &S::Vector<4>) -> S::Matrix<3, 3> { + let ivec = params.get_fixed_rows::<3>(1); + let re = params.get_elem(0); + + let unit_x = S::Vector::from_f64_array([1.0, 0.0, 0.0]); + let unit_y = S::Vector::from_f64_array([0.0, 1.0, 0.0]); + let unit_z = S::Vector::from_f64_array([0.0, 0.0, 1.0]); + + let two = S::from_f64(2.0); + + let uv_x: S::Vector<3> = + cross::(ivec.clone(), unit_x.clone()).scaled(two.clone()); + let uv_y: S::Vector<3> = + cross::(ivec.clone(), unit_y.clone()).scaled(two.clone()); + let uv_z: S::Vector<3> = cross::(ivec.clone(), unit_z.clone()).scaled(two); + + let col_x = + unit_x + cross::(ivec.clone(), uv_x.clone()) + uv_x.scaled(re.clone()); + let col_y = + unit_y + cross::(ivec.clone(), uv_y.clone()) + uv_y.scaled(re.clone()); + let col_z = + unit_z + cross::(ivec.clone(), uv_z.clone()) + uv_z.scaled(re.clone()); + + S::Matrix::block_mat1x2::<1, 2>( + col_x.to_mat(), + S::Matrix::block_mat1x2(col_y.to_mat(), col_z.to_mat()), + ) + } + + fn ad(omega: &S::Vector<3>) -> S::Matrix<3, 3> { + Self::hat(omega) + } + + type GenG> = Rotation3Impl; + type RealG = Rotation3Impl; + type DualG = Rotation3Impl; + + fn group_mul(lhs_params: &S::Vector<4>, rhs_params: &S::Vector<4>) -> S::Vector<4> { + let lhs_re = lhs_params.get_elem(0); + let rhs_re = rhs_params.get_elem(0); + + let lhs_ivec = lhs_params.get_fixed_rows::<3>(1); + let rhs_ivec = rhs_params.get_fixed_rows::<3>(1); + + let re = lhs_re.clone() * rhs_re.clone() - lhs_ivec.clone().dot(rhs_ivec.clone()); + let ivec = rhs_ivec.scaled(lhs_re) + + lhs_ivec.scaled(rhs_re) + + cross::(lhs_ivec, rhs_ivec); + + let mut params = S::Vector::block_vec2(re.to_vec(), ivec); + + if ((params.norm() - S::from_f64(1.0)) + .abs() + .greater_equal(&S::from_f64(1e-7))) + .any() + { + // todo: use tailor approximation for norm close to 1 + params = params.normalized(); + } + params + } +} + +impl, const BATCH: usize> IsRealLieGroupImpl + for Rotation3Impl +{ + fn dx_exp_x_at_0() -> S::Matrix<4, 3> { + S::Matrix::from_f64_array2([ + [0.0, 0.0, 0.0], + [0.5, 0.0, 0.0], + [0.0, 0.5, 0.0], + [0.0, 0.0, 0.5], + ]) + } + + fn da_a_mul_b(_a: &S::Vector<4>, b: &S::Vector<4>) -> S::Matrix<4, 4> { + let b_real = b.get_elem(0); + let b_imag0 = b.get_elem(1); + let b_imag1 = b.get_elem(2); + let b_imag2 = b.get_elem(3); + + S::Matrix::<4, 4>::from_array2([ + [b_real, -b_imag0, -b_imag1, -b_imag2], + [b_imag0, b_real, b_imag2, -b_imag1], + [b_imag1, -b_imag2, b_real, b_imag0], + [b_imag2, b_imag1, -b_imag0, b_real], + ]) + } + + fn db_a_mul_b(a: &S::Vector<4>, _b: &S::Vector<4>) -> S::Matrix<4, 4> { + let a_real = a.get_elem(0); + let a_imag0 = a.get_elem(1); + let a_imag1 = a.get_elem(2); + let a_imag2 = a.get_elem(3); + + S::Matrix::<4, 4>::from_array2([ + [a_real, -a_imag0, -a_imag1, -a_imag2], + [a_imag0, a_real, -a_imag2, a_imag1], + [a_imag1, a_imag2, a_real, -a_imag0], + [a_imag2, -a_imag1, a_imag0, a_real], + ]) + } + + fn dx_exp_x_times_point_at_0(point: S::Vector<3>) -> S::Matrix<3, 3> { + Self::hat(&-point) + } + + fn dx_exp(omega: &S::Vector<3>) -> S::Matrix<4, 3> { + let theta_sq = omega.squared_norm(); + + let near_zero = theta_sq.less_equal(&S::from_f64(1e-6)); + + let dx0 = Self::dx_exp_x_at_0(); + + println!("dx0\n{:?}", dx0); + + let omega_0 = omega.get_elem(0); + let omega_1 = omega.get_elem(1); + let omega_2 = omega.get_elem(2); + let theta = theta_sq.sqrt(); + let a = (S::from_f64(0.5) * theta).sin() / theta; + let b = (S::from_f64(0.5) * theta).cos() / (theta_sq) + - S::from_f64(2.0) * (S::from_f64(0.5) * theta).sin() / (theta_sq * theta); + + let dx = S::Matrix::from_array2([ + [-omega_0 * a, -omega_1 * a, -omega_2 * a], + [ + omega_0 * omega_0 * b + S::from_f64(2.0) * a, + omega_0 * omega_1 * b, + omega_0 * omega_2 * b, + ], + [ + omega_0 * omega_1 * b, + omega_1 * omega_1 * b + S::from_f64(2.0) * a, + omega_1 * omega_2 * b, + ], + [ + omega_0 * omega_2 * b, + omega_1 * omega_2 * b, + omega_2 * omega_2 * b + S::from_f64(2.0) * a, + ], + ]) + .scaled(S::from_f64(0.5)); + dx0.select(&near_zero, dx) + } + + fn dx_log_x(params: &S::Vector<4>) -> S::Matrix<3, 4> { + let ivec: S::Vector<3> = params.get_fixed_rows::<3>(1); + let w = params.get_elem(0); + let squared_n = ivec.squared_norm(); + + let near_zero = squared_n.less_equal(&S::from_f64(1e-6)); + + let m0 = S::Matrix::<3, 4>::block_mat1x2( + S::Matrix::<3, 1>::zeros(), + S::Matrix::<3, 3>::identity().scaled(S::from_f64(2.0)), + ); + + let n = squared_n.sqrt(); + let theta = S::from_f64(2.0) * n.atan2(w); + + let dw_ivec_theta: S::Vector<3> = ivec.scaled(S::from_f64(-2.0) / (squared_n + w * w)); + let factor = + S::from_f64(2.0) * w / (squared_n * (squared_n + w * w)) - theta / (squared_n * n); + + let mm = ivec.clone().outer(ivec).scaled(factor); + + m0.select( + &near_zero, + S::Matrix::block_mat1x2( + dw_ivec_theta.to_mat(), + S::Matrix::<3, 3>::identity().scaled(theta / n) + mm, + ), + ) + } + + fn has_shortest_path_ambiguity(params: &S::Vector<4>) -> S::Mask { + let theta = Self::log(params).norm(); + (theta - S::from_f64(std::f64::consts::PI)) + .abs() + .less_equal(&S::from_f64(1e-6)) + } +} + +impl, const BATCH: usize> crate::traits::IsLieFactorGroupImpl + for Rotation3Impl +{ + type GenFactorG> = Rotation3Impl; + type RealFactorG = Rotation3Impl; + type DualFactorG = Rotation3Impl; + + fn mat_v(omega: &S::Vector<3>) -> S::Matrix<3, 3> { + let theta_sq = omega.squared_norm(); + let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); + let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega.clone()); + + let near_zero = theta_sq.less_equal(&S::from_f64(1e-6)); + + let mat_v0 = S::Matrix::<3, 3>::identity() + mat_omega.scaled(S::from_f64(0.5)); + + let theta = theta_sq.clone().sqrt(); + let mat_v = S::Matrix::<3, 3>::identity() + + mat_omega.scaled((S::from_f64(1.0) - theta.clone().cos()) / theta_sq.clone()) + + mat_omega_sq.scaled((theta.clone() - theta.clone().sin()) / (theta_sq * theta)); + + mat_v0.select(&near_zero, mat_v) + } + + fn mat_v_inverse(omega: &S::Vector<3>) -> S::Matrix<3, 3> { + let theta_sq = omega.clone().dot(omega.clone()); + let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); + let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega.clone()); + + let near_zero = theta_sq.less_equal(&S::from_f64(1e-6)); + + let mat_v_inv0 = S::Matrix::<3, 3>::identity() - mat_omega.scaled(S::from_f64(0.5)) + + mat_omega_sq.scaled(S::from_f64(1. / 12.)); + + let theta = theta_sq.clone().sqrt(); + let half_theta = S::from_f64(0.5) * theta.clone(); + + let mat_v_inv = S::Matrix::<3, 3>::identity() - mat_omega.scaled(S::from_f64(0.5)) + + mat_omega_sq.scaled( + (S::from_f64(1.0) + - (S::from_f64(0.5) * theta.clone() * half_theta.clone().cos()) + / half_theta.sin()) + / (theta.clone() * theta), + ); + + mat_v_inv0.select(&near_zero, mat_v_inv) + } + + fn adj_of_translation(params: &S::Vector<4>, point: &S::Vector<3>) -> S::Matrix<3, 3> { + Rotation3Impl::::hat(point).mat_mul(Rotation3Impl::::matrix(params)) + } + + fn ad_of_translation(point: &S::Vector<3>) -> S::Matrix<3, 3> { + Rotation3Impl::::hat(point) + } +} + +impl, const BATCH: usize> IsRealLieFactorGroupImpl + for Rotation3Impl +{ + fn dx_mat_v(omega: &S::Vector<3>) -> [S::Matrix<3, 3>; 3] { + let theta_sq = omega.squared_norm(); + let theta_p4 = theta_sq * theta_sq; + let dt_mat_omega_pos_idx = [[2, 1], [0, 2], [1, 0]]; + let dt_mat_omega_neg_idx = [[1, 2], [2, 0], [0, 1]]; + + let near_zero = theta_sq.less_equal(&S::from_f64(1e-6)); + + let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); + let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega.clone()); + + let omega_x = omega.get_elem(0); + let omega_y = omega.get_elem(1); + let omega_z = omega.get_elem(2); + + let theta = theta_sq.sqrt(); + let domega_theta = + S::Vector::from_array([omega_x / theta, omega_y / theta, omega_z / theta]); + + let a = (S::ones() - theta.cos()) / theta_sq; + let dt_a = (S::from_f64(-2.0) + S::from_f64(2.0) * theta.cos() + theta * theta.sin()) + / (theta * theta_sq); + + let b = (theta - theta.sin()) / (theta_sq * theta); + let dt_b = -(S::from_f64(2.0) * theta + theta * theta.cos() + - S::from_f64(3.0) * theta.sin()) + / (theta_p4); + + let dt_mat_omega_sq = [ + S::Matrix::from_array2([ + [S::zeros(), omega_y, omega_z], + [omega_y, S::from_f64(-2.0) * omega_x, S::zeros()], + [omega_z, S::zeros(), S::from_f64(-2.0) * omega_x], + ]), + S::Matrix::from_array2([ + [S::from_f64(-2.0) * omega_y, omega_x, S::zeros()], + [omega_x, S::zeros(), omega_z], + [S::zeros(), omega_z, S::from_f64(-2.0) * omega_y], + ]), + S::Matrix::from_array2([ + [S::from_f64(-2.0) * omega_z, S::zeros(), omega_x], + [S::zeros(), S::from_f64(-2.0) * omega_z, omega_y], + [omega_x, omega_y, S::zeros()], + ]), + ]; + + let a = S::from_f64(0.5).select(&near_zero, a); + println!("a = {:?}", a); + + println!("omega = {:?}", omega); + println!("b = {:?}", b); + println!("dt_b = {:?}", dt_b); + + println!("dt_mat_omega_sq = {:?}", dt_mat_omega_sq); + + let set = |i| { + let tmp0 = mat_omega.clone().scaled(dt_a * domega_theta.get_elem(i)); + let tmp1 = dt_mat_omega_sq[i].scaled(b); + let tmp2 = mat_omega_sq.scaled(dt_b * domega_theta.get_elem(i)); + + println!("tmp2 = {:?}", tmp2); + let mut l_i: S::Matrix<3, 3> = + S::Matrix::zeros().select(&near_zero, tmp0 + tmp1 + tmp2); + let pos_idx = dt_mat_omega_pos_idx[i]; + l_i.set_elem(pos_idx, a + l_i.get_elem(pos_idx)); + + let neg_idx = dt_mat_omega_neg_idx[i]; + l_i.set_elem(neg_idx, -a + l_i.get_elem(neg_idx)); + l_i + }; + + let l: [S::Matrix<3, 3>; 3] = [set(0), set(1), set(2)]; + + l + } + + fn dparams_matrix_times_point(params: &S::Vector<4>, point: &S::Vector<3>) -> S::Matrix<3, 4> { + let r = params.get_elem(0); + let ivec0 = params.get_elem(1); + let ivec1 = params.get_elem(2); + let ivec2 = params.get_elem(3); + + let p0 = point.get_elem(0); + let p1 = point.get_elem(1); + let p2 = point.get_elem(2); + + S::Matrix::from_array2([ + [ + S::from_f64(2.0) * ivec1 * p2 - S::from_f64(2.0) * ivec2 * p1, + S::from_f64(2.0) * ivec1 * p1 + S::from_f64(2.0) * ivec2 * p2, + S::from_f64(2.0) * r * p2 + S::from_f64(2.0) * ivec0 * p1 + - S::from_f64(4.0) * ivec1 * p0, + S::from_f64(-2.0) * r * p1 + S::from_f64(2.0) * ivec0 * p2 + - S::from_f64(4.0) * ivec2 * p0, + ], + [ + S::from_f64(-2.0) * ivec0 * p2 + S::from_f64(2.0) * ivec2 * p0, + S::from_f64(-2.0) * r * p2 - S::from_f64(4.0) * ivec0 * p1 + + S::from_f64(2.0) * ivec1 * p0, + S::from_f64(2.0) * ivec0 * p0 + S::from_f64(2.0) * ivec2 * p2, + S::from_f64(2.0) * r * p0 + S::from_f64(2.0) * ivec1 * p2 + - S::from_f64(4.0) * ivec2 * p1, + ], + [ + S::from_f64(2.0) * ivec0 * p1 - S::from_f64(2.0) * ivec1 * p0, + S::from_f64(2.0) * r * p1 - S::from_f64(4.0) * ivec0 * p2 + + S::from_f64(2.0) * ivec2 * p0, + S::from_f64(-2.0) * r * p0 - S::from_f64(4.0) * ivec1 * p2 + + S::from_f64(2.0) * ivec2 * p1, + S::from_f64(2.0) * ivec0 * p0 + S::from_f64(2.0) * ivec1 * p1, + ], + ]) + } + + fn dx_mat_v_inverse(omega: &S::Vector<3>) -> [S::Matrix<3, 3>; 3] { + let theta_sq = omega.squared_norm(); + let theta = theta_sq.sqrt(); + let half_theta = S::from_f64(0.5) * theta; + let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); + let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega); + + let dt_mat_omega_pos_idx = [[2, 1], [0, 2], [1, 0]]; + let dt_mat_omega_neg_idx = [[1, 2], [2, 0], [0, 1]]; + + let omega_x = omega.get_elem(0); + let omega_y = omega.get_elem(1); + let omega_z = omega.get_elem(2); + + let near_zero = theta_sq.less_equal(&S::from_f64(1e-6)); + + let domega_theta = + S::Vector::from_array([omega_x / theta, omega_y / theta, omega_z / theta]); + + let c = (S::from_f64(1.0) + - (S::from_f64(0.5) * theta * half_theta.cos()) / (half_theta.sin())) + / theta_sq; + + let dt_c = (S::from_f64(-2.0) + + (S::from_f64(0.25) * theta_sq) / (half_theta.sin() * half_theta.sin()) + + (half_theta * half_theta.cos()) / half_theta.sin()) + / (theta * theta_sq); + + let dt_mat_omega_sq: &[S::Matrix<3, 3>; 3] = &[ + S::Matrix::from_array2([ + [S::from_f64(0.0), omega_y, omega_z], + [omega_y, S::from_f64(-2.0) * omega_x, S::from_f64(0.0)], + [omega_z, S::from_f64(0.0), S::from_f64(-2.0) * omega_x], + ]), + S::Matrix::from_array2([ + [S::from_f64(-2.0) * omega_y, omega_x, S::from_f64(0.0)], + [omega_x, S::from_f64(0.0), omega_z], + [S::from_f64(0.0), omega_z, S::from_f64(-2.0) * omega_y], + ]), + S::Matrix::from_array2([ + [S::from_f64(-2.0) * omega_z, S::from_f64(0.0), omega_x], + [S::from_f64(0.0), S::from_f64(-2.0) * omega_z, omega_y], + [omega_x, omega_y, S::from_f64(0.0)], + ]), + ]; + + let set = |i| -> S::Matrix<3, 3> { + let t: &S::Matrix<3, 3> = &dt_mat_omega_sq[i]; + let foo: S::Matrix<3, 3> = + t.scaled(c) + mat_omega_sq.scaled(domega_theta.get_elem(i) * dt_c); + let mut l_i: S::Matrix<3, 3> = S::Matrix::zeros().select(&near_zero, foo); + + let pos_idx = dt_mat_omega_pos_idx[i]; + l_i.set_elem(pos_idx, S::from_f64(-0.5) + l_i.get_elem(pos_idx)); + + let neg_idx = dt_mat_omega_neg_idx[i]; + l_i.set_elem(neg_idx, S::from_f64(0.5) + l_i.get_elem(neg_idx)); + l_i + }; + + let l: [S::Matrix<3, 3>; 3] = [set(0), set(1), set(2)]; + + l + } +} + +/// 3d rotation group - SO(3) +pub type Rotation3 = LieGroup>; + +#[test] +fn rotation3_prop_tests() { + use crate::factor_lie_group::RealFactorLieGroupTest; + use crate::real_lie_group::RealLieGroupTest; + use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; + use sophus_core::calculus::dual::dual_scalar::DualScalar; + use sophus_core::linalg::BatchScalarF64; + + Rotation3::::test_suite(); + Rotation3::, 8>::test_suite(); + Rotation3::::test_suite(); + Rotation3::, 8>::test_suite(); + + Rotation3::::run_real_tests(); + Rotation3::, 8>::run_real_tests(); + + Rotation3::::run_real_factor_tests(); + Rotation3::, 8>::run_real_factor_tests(); +} diff --git a/crates/sophus_lie/src/translation_product_product.rs b/crates/sophus_lie/src/groups/translation_product_product.rs similarity index 67% rename from crates/sophus_lie/src/translation_product_product.rs rename to crates/sophus_lie/src/groups/translation_product_product.rs index c90599b..b1f4f35 100644 --- a/crates/sophus_lie/src/translation_product_product.rs +++ b/crates/sophus_lie/src/groups/translation_product_product.rs @@ -1,51 +1,49 @@ use std::vec; -use super::lie_group::LieGroup; -use super::traits::IsF64LieFactorGroupImpl; -use super::traits::IsF64LieGroupImpl; -use super::traits::IsLieFactorGroupImpl; - -use super::traits::IsLieGroupImpl; -use super::traits::IsTranslationProductGroup; -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::manifold; -use sophus_calculus::points::example_points; -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::vector::IsVectorLike; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use crate::lie_group::LieGroup; +use crate::traits::IsLieFactorGroupImpl; +use crate::traits::IsLieGroupImpl; +use crate::traits::IsRealLieFactorGroupImpl; +use crate::traits::IsRealLieGroupImpl; +use crate::traits::IsTranslationProductGroup; +use sophus_core::calculus::manifold; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; +use sophus_core::points::example_points; /// implementation of a translation product group /// /// It is a semi-direct product of the commutative translation group (Euclidean vector space) and a factor group. -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, Default)] pub struct TranslationProductGroupImpl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - F: IsLieFactorGroupImpl, + const BATCH: usize, + F: IsLieFactorGroupImpl, > { phantom: std::marker::PhantomData<(S, F)>, } impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - F: IsLieFactorGroupImpl, - > TranslationProductGroupImpl + const BATCH: usize, + F: IsLieFactorGroupImpl, + > TranslationProductGroupImpl { /// translation part of the group parameters pub fn translation(params: &S::Vector) -> S::Vector { @@ -84,23 +82,24 @@ impl< } fn translation_examples() -> Vec> { - example_points::() + example_points::() } } impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - F: IsLieFactorGroupImpl, - > ParamsImpl - for TranslationProductGroupImpl + const BATCH: usize, + F: IsLieFactorGroupImpl, + > ParamsImpl + for TranslationProductGroupImpl { - fn are_params_valid(params: &S::Vector) -> bool { + fn are_params_valid(params: &S::Vector) -> S::Mask { F::are_params_valid(&Self::factor_params(params)) } @@ -116,23 +115,24 @@ impl< fn invalid_params_examples() -> Vec> { vec![Self::params_from( - &S::Vector::zero(), + &S::Vector::zeros(), &F::invalid_params_examples()[0], )] } } impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - F: IsLieFactorGroupImpl, - > manifold::traits::TangentImpl - for TranslationProductGroupImpl + const BATCH: usize, + F: IsLieFactorGroupImpl, + > manifold::traits::TangentImpl + for TranslationProductGroupImpl { fn tangent_examples() -> Vec> { let mut examples = vec![]; @@ -145,19 +145,18 @@ impl< } } -// TODO : Port to Rust - impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - Factor: IsLieFactorGroupImpl, - > IsLieGroupImpl - for TranslationProductGroupImpl + const BATCH: usize, + Factor: IsLieFactorGroupImpl, + > IsLieGroupImpl + for TranslationProductGroupImpl { const IS_ORIGIN_PRESERVING: bool = false; const IS_AXIS_DIRECTION_PRESERVING: bool = Factor::IS_AXIS_DIRECTION_PRESERVING; @@ -167,7 +166,7 @@ impl< const IS_PARALLEL_LINE_PRESERVING: bool = true; fn identity_params() -> S::Vector { - Self::params_from(&S::Vector::zero(), &Factor::identity_params()) + Self::params_from(&S::Vector::zeros(), &Factor::identity_params()) } // Manifold / Lie Group concepts @@ -181,7 +180,7 @@ impl< Factor::matrix(&factor_params), Factor::adj_of_translation(&factor_params, &translation), ), - (S::Matrix::zero(), Factor::adj(&factor_params)), + (S::Matrix::zeros(), Factor::adj(&factor_params)), ) } @@ -211,7 +210,7 @@ impl< Factor::hat(&Self::factor_tangent(omega)), Self::translation_tangent(omega).to_mat(), ), - (S::Matrix::zero(), S::Matrix::zero()), + (S::Matrix::zeros(), S::Matrix::zeros()), ) } @@ -249,7 +248,7 @@ impl< } fn to_ambient(params: &S::Vector) -> S::Vector { - S::Vector::block_vec2(params.clone(), S::Vector::<1>::zero()) + S::Vector::block_vec2(params.clone(), S::Vector::<1>::zeros()) } fn compact(params: &S::Vector) -> S::Matrix { @@ -265,12 +264,15 @@ impl< Factor::matrix(&Self::factor_params(params)), Self::translation(params).to_mat(), ), - (S::Matrix::<1, POINT>::zero(), S::Matrix::<1, 1>::identity()), + ( + S::Matrix::<1, POINT>::zeros(), + S::Matrix::<1, 1>::identity(), + ), ) } fn ad(tangent: &S::Vector) -> S::Matrix { - let o = S::Matrix::::zero(); + let o = S::Matrix::::zeros(); S::Matrix::block_mat2x2::( ( Factor::hat(&Self::factor_tangent(tangent)), @@ -280,7 +282,7 @@ impl< ) } - type GenG> = TranslationProductGroupImpl< + type GenG> = TranslationProductGroupImpl< S2, DOF, PARAMS, @@ -288,87 +290,89 @@ impl< AMBIENT, SDOF, SPARAMS, + BATCH, Factor::GenFactorG, >; type RealG = TranslationProductGroupImpl< - f64, + S::RealScalar, DOF, PARAMS, POINT, AMBIENT, SDOF, SPARAMS, - Factor::GenFactorG, + BATCH, + Factor::GenFactorG, >; type DualG = TranslationProductGroupImpl< - Dual, + S::DualScalar, DOF, PARAMS, POINT, AMBIENT, SDOF, SPARAMS, - Factor::GenFactorG, + BATCH, + Factor::GenFactorG, >; - - fn has_shortest_path_ambiguity(params: &>::Vector) -> bool { - Factor::has_shortest_path_ambiguity(&Self::factor_params(params)) - } } impl< + S: IsRealScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - Factor: IsF64LieFactorGroupImpl, - > IsF64LieGroupImpl - for TranslationProductGroupImpl + const BATCH: usize, + Factor: IsRealLieFactorGroupImpl, + > IsRealLieGroupImpl + for TranslationProductGroupImpl { - fn dx_exp_x_at_0() -> MatF64 { - MatF64::block_mat2x2::( + fn dx_exp_x_at_0() -> S::Matrix { + S::Matrix::block_mat2x2::( + ( + S::Matrix::::identity(), + S::Matrix::::zeros(), + ), ( - MatF64::::identity(), - MatF64::::zero(), + S::Matrix::::zeros(), + Factor::dx_exp_x_at_0(), ), - (MatF64::::zero(), Factor::dx_exp_x_at_0()), ) } - fn dx_exp_x_times_point_at_0(point: &VecF64) -> MatF64 { - MatF64::block_mat1x2( - MatF64::::identity(), + fn dx_exp_x_times_point_at_0(point: S::Vector) -> S::Matrix { + S::Matrix::block_mat1x2( + S::Matrix::::identity(), Factor::dx_exp_x_times_point_at_0(point), ) } - fn dx_exp(tangent: &VecF64) -> MatF64 { + fn dx_exp(tangent: &S::Vector) -> S::Matrix { let factor_tangent = &Self::factor_tangent(tangent); let trans_tangent = &Self::translation_tangent(tangent); let dx_mat_v = Factor::dx_mat_v(factor_tangent); - let mut dx_mat_v_tangent = MatF64::::zero(); + let mut dx_mat_v_tangent = S::Matrix::::zeros(); for i in 0..SDOF { - dx_mat_v_tangent - .fixed_columns_mut::<1>(i) - .copy_from(&(dx_mat_v[i] * trans_tangent)); + dx_mat_v_tangent.set_col_vec(i, dx_mat_v[i].clone() * trans_tangent.clone()); } - MatF64::block_mat2x2::( + S::Matrix::block_mat2x2::( (Factor::mat_v(factor_tangent), dx_mat_v_tangent), ( - MatF64::::zero(), + S::Matrix::::zeros(), Factor::dx_exp(factor_tangent), ), ) } - fn dx_log_x(params: &VecF64) -> MatF64 { + fn dx_log_x(params: &S::Vector) -> S::Matrix { let factor_params = &Self::factor_params(params); let trans = &Self::translation(params); let factor_tangent = Factor::log(factor_params); @@ -376,64 +380,71 @@ impl< let dx_log_x = Factor::dx_log_x(factor_params); let dx_mat_v_inverse = Factor::dx_mat_v_inverse(&factor_tangent); - let mut dx_mat_v_inv_tangent = MatF64::::zero(); + let mut dx_mat_v_inv_tangent = S::Matrix::::zeros(); for i in 0..SDOF { - let v = dx_mat_v_inverse[i] * trans; - let r = dx_log_x.row(i); - dx_mat_v_inv_tangent += v * r; + let v: S::Vector = dx_mat_v_inverse[i].clone() * trans.clone(); + let r: S::Vector = dx_log_x.get_row_vec(i); + + let m = v.outer(r); + dx_mat_v_inv_tangent = dx_mat_v_inv_tangent + m; } - MatF64::block_mat2x2::( + S::Matrix::block_mat2x2::( (Factor::mat_v_inverse(&factor_tangent), dx_mat_v_inv_tangent), - (MatF64::::zero(), dx_log_x), + (S::Matrix::::zeros(), dx_log_x), ) } - fn da_a_mul_b(a: &VecF64, b: &VecF64) -> MatF64 { + fn da_a_mul_b(a: &S::Vector, b: &S::Vector) -> S::Matrix { let a_factor_params = &Self::factor_params(a); let b_factor_params = &Self::factor_params(b); let b_trans = &Self::translation(b); - MatF64::block_mat2x2::( + S::Matrix::block_mat2x2::( ( - MatF64::::identity(), + S::Matrix::::identity(), Factor::dparams_matrix_times_point(a_factor_params, b_trans), ), ( - MatF64::::zero(), + S::Matrix::::zeros(), Factor::da_a_mul_b(a_factor_params, b_factor_params), ), ) } - fn db_a_mul_b(a: &VecF64, b: &VecF64) -> MatF64 { + fn db_a_mul_b(a: &S::Vector, b: &S::Vector) -> S::Matrix { let a_factor_params = &Self::factor_params(a); let b_factor_params = &Self::factor_params(b); - MatF64::block_mat2x2::( + S::Matrix::block_mat2x2::( ( Factor::matrix(a_factor_params), - MatF64::::zero(), + S::Matrix::::zeros(), ), ( - MatF64::::zero(), + S::Matrix::::zeros(), Factor::db_a_mul_b(a_factor_params, b_factor_params), ), ) } + + fn has_shortest_path_ambiguity(params: &::Vector) -> ::Mask { + Factor::has_shortest_path_ambiguity(&Self::factor_params(params)) + } } impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, const SDOF: usize, const SPARAMS: usize, - FactorImpl: crate::traits::IsLieFactorGroupImpl, + const BATCH: usize, + FactorImpl: crate::traits::IsLieFactorGroupImpl, > IsTranslationProductGroup< S, @@ -443,8 +454,8 @@ impl< AMBIENT, SDOF, SPARAMS, - 1, - LieGroup, + BATCH, + LieGroup, > for crate::lie_group::LieGroup< S, @@ -452,38 +463,57 @@ impl< PARAMS, POINT, AMBIENT, - 1, - TranslationProductGroupImpl, + BATCH, + TranslationProductGroupImpl< + S, + DOF, + PARAMS, + POINT, + AMBIENT, + SDOF, + SPARAMS, + BATCH, + FactorImpl, + >, > { - type Impl = - TranslationProductGroupImpl; + type Impl = TranslationProductGroupImpl< + S, + DOF, + PARAMS, + POINT, + AMBIENT, + SDOF, + SPARAMS, + BATCH, + FactorImpl, + >; fn from_translation_and_factor( - translation: &>::Vector, - factor: &LieGroup, + translation: &>::Vector, + factor: &LieGroup, ) -> Self { let params = Self::Impl::params_from(translation, factor.params()); Self::from_params(¶ms) } - fn from_t(translation: &>::Vector) -> Self { + fn from_t(translation: &>::Vector) -> Self { Self::from_translation_and_factor(translation, &LieGroup::identity()) } - fn set_translation(&mut self, translation: &>::Vector) { + fn set_translation(&mut self, translation: &>::Vector) { self.set_params(&Self::G::params_from(translation, self.factor().params())) } - fn translation(&self) -> >::Vector { + fn translation(&self) -> >::Vector { Self::Impl::translation(self.params()) } - fn set_factor(&mut self, factor: &LieGroup) { + fn set_factor(&mut self, factor: &LieGroup) { self.set_params(&Self::G::params_from(&self.translation(), factor.params())) } - fn factor(&self) -> LieGroup { + fn factor(&self) -> LieGroup { LieGroup::from_params(&Self::Impl::factor_params(self.params())) } } diff --git a/crates/sophus_lie/src/isometry2.rs b/crates/sophus_lie/src/isometry2.rs deleted file mode 100644 index 5c62e1f..0000000 --- a/crates/sophus_lie/src/isometry2.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::{ - lie_group::LieGroup, rotation2::Rotation2Impl, - translation_product_product::TranslationProductGroupImpl, -}; -use crate::rotation2::Rotation2; -use crate::traits::IsTranslationProductGroup; -use sophus_calculus::types::scalar::IsScalar; - -/// 2D isometry group implementation struct - SE(2) -pub type Isometry2Impl = TranslationProductGroupImpl>; - -/// 2D isometry group - SE(2) -pub type Isometry2 = LieGroup>; - -impl> Isometry2 { - /// create isometry from translation and rotation - pub fn from_translation_and_rotation( - translation: &>::Vector<2>, - rotation: &Rotation2, - ) -> Self { - Self::from_translation_and_factor(translation, rotation) - } - - /// set rotation - pub fn set_rotation(&mut self, rotation: &Rotation2) { - self.set_factor(rotation) - } - - /// get rotation - pub fn rotation(&self) -> Rotation2 { - self.factor() - } -} - -mod tests { - - #[test] - fn isometry2_prop_tests() { - use super::Isometry2; - use sophus_calculus::dual::dual_scalar::Dual; - - Isometry2::::test_suite(); - Isometry2::::test_suite(); - Isometry2::::real_test_suite(); - } -} diff --git a/crates/sophus_lie/src/isometry3.rs b/crates/sophus_lie/src/isometry3.rs deleted file mode 100644 index b23d0af..0000000 --- a/crates/sophus_lie/src/isometry3.rs +++ /dev/null @@ -1,58 +0,0 @@ -use super::{ - lie_group::LieGroup, - rotation3::{Isometry3Impl, Rotation3}, -}; -use crate::traits::IsTranslationProductGroup; -use sophus_calculus::types::scalar::IsScalar; - -/// 3d isometry group - SE(3) -pub type Isometry3 = LieGroup>; - -impl> Isometry3 { - /// create isometry from translation and rotation - pub fn from_translation_and_rotation( - translation: &>::Vector<3>, - rotation: &Rotation3, - ) -> Self { - Self::from_translation_and_factor(translation, rotation) - } - - /// set rotation - pub fn set_rotation(&mut self, rotation: &Rotation3) { - self.set_factor(rotation) - } - - /// get translation - pub fn rotation(&self) -> Rotation3 { - self.factor() - } -} - -impl Default for Isometry3 { - fn default() -> Self { - Self::identity() - } -} - -mod tests { - - #[test] - fn isometry3_prop_tests() { - use super::Isometry3; - use crate::traits::IsTranslationProductGroup; - use sophus_calculus::dual::dual_scalar::Dual; - - Isometry3::::test_suite(); - Isometry3::::test_suite(); - Isometry3::::real_test_suite(); - - for g in Isometry3::::element_examples() { - let translation = g.translation(); - let rotation = g.rotation(); - - let g2 = Isometry3::from_translation_and_rotation(&translation, &rotation); - assert_eq!(g2.translation(), translation); - assert_eq!(g2.rotation().matrix(), rotation.matrix()); - } - } -} diff --git a/crates/sophus_lie/src/lib.rs b/crates/sophus_lie/src/lib.rs index ca57431..a8bb3a9 100644 --- a/crates/sophus_lie/src/lib.rs +++ b/crates/sophus_lie/src/lib.rs @@ -1,17 +1,21 @@ +#![feature(portable_simd)] #![deny(missing_docs)] //! # Lie groups module -/// 2d isometry -pub mod isometry2; -/// 3d isometry -pub mod isometry3; +/// Lie groups +pub mod groups; + /// Lie groups pub mod lie_group; -/// 2d rotation -pub mod rotation2; -/// 3d rotation -pub mod rotation3; + +/// Lie groups +pub mod factor_lie_group; + +/// Lie group as a manifold +pub mod lie_group_manifold; + /// Lie group traits pub mod traits; -/// semi-direct product -pub mod translation_product_product; + +/// Real lie group +pub mod real_lie_group; diff --git a/crates/sophus_lie/src/lie_group.rs b/crates/sophus_lie/src/lie_group.rs index c60a815..f2a1f7b 100644 --- a/crates/sophus_lie/src/lie_group.rs +++ b/crates/sophus_lie/src/lie_group.rs @@ -1,36 +1,18 @@ -use std::fmt::Debug; -use std::fmt::Display; -use std::fmt::Formatter; - +use super::traits::IsLieGroupImpl; +use crate::traits::IsLieGroup; use approx::assert_relative_eq; use assertables::assert_le_as_result; - -use sophus_calculus::dual::dual_matrix::DualM; -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::dual::dual_vector::DualV; -use sophus_calculus::manifold::traits::IsManifold; -use sophus_calculus::manifold::traits::TangentImpl; -use sophus_calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; -use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; -use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_calculus::points::example_points; -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::vector::IsVectorLike; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_tensor::view::IsTensorLike; - -use super::traits::IsF64LieFactorGroupImpl; -use super::traits::IsF64LieGroupImpl; -use super::traits::IsLieGroup; -use super::traits::IsLieGroupImpl; +use sophus_core::calculus::manifold::traits::TangentImpl; +use sophus_core::linalg::bool_mask::BoolMask; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; +use std::fmt::Debug; /// Lie group -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, Default)] pub struct LieGroup< S: IsScalar, const DOF: usize, @@ -40,7 +22,7 @@ pub struct LieGroup< const BATCH_SIZE: usize, G: IsLieGroupImpl, > { - params: S::Vector, + pub(crate) params: S::Vector, phantom: std::marker::PhantomData, } @@ -55,7 +37,7 @@ impl< > ParamsImpl for LieGroup { - fn are_params_valid(params: &>::Vector) -> bool { + fn are_params_valid(params: &>::Vector) -> S::Mask { G::are_params_valid(params) } @@ -80,9 +62,9 @@ impl< { fn from_params(params: &S::Vector) -> Self { assert!( - G::are_params_valid(params), - "Invalid parameters for {}", - params.real() + G::are_params_valid(params).all(), + "Invalid parameters for {:?}", + params.real_vector() ); Self { params: params.clone(), @@ -115,24 +97,35 @@ impl< } impl< - S: IsScalar<1>, + S: IsScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, - G: IsLieGroupImpl, - > IsLieGroup - for LieGroup + const BATCH_SIZE: usize, + G: IsLieGroupImpl, + > IsLieGroup + for LieGroup { type G = G; - type GenG> = G::GenG; + type GenG> = G::GenG; type RealG = G::RealG; type DualG = G::DualG; - type GenGroup, G2: IsLieGroupImpl> = - LieGroup; - type RealGroup = Self::GenGroup; - type DualGroup = Self::GenGroup; + type GenGroup< + S2: IsScalar, + G2: IsLieGroupImpl, + > = LieGroup; + type RealGroup = Self::GenGroup; + type DualGroup = Self::GenGroup; + + const DOF: usize = DOF; + + const PARAMS: usize = PARAMS; + + const POINT: usize = POINT; + + const AMBIENT: usize = AMBIENT; } impl< @@ -210,11 +203,6 @@ impl< G::ad(tangent) } - /// are there multiple shortest paths to the identity? - pub fn has_shortest_path_ambiguity(&self) -> bool { - G::has_shortest_path_ambiguity(&self.params) - } - /// group element examples pub fn element_examples() -> Vec { let mut elements = vec![]; @@ -227,19 +215,26 @@ impl< fn presentability_tests() { if G::IS_ORIGIN_PRESERVING { for g in &Self::element_examples() { - let o = S::Vector::::zero(); - assert_relative_eq!(g.transform(&o).real(), o.real()); + let o = S::Vector::::zeros(); + + approx::assert_abs_diff_eq!( + g.transform(&o).real_vector(), + o.real_vector(), + epsilon = 0.0001 + ); } } else { let mut num_preserves = 0; let mut num = 0; for g in &Self::element_examples() { - let o = S::Vector::::zero(); + let o = S::Vector::::zeros(); let o_transformed = g.transform(&o); - if (o_transformed.real()).norm() < 0.0001 { - num_preserves += 1; - } - num += 1; + let mask = (o_transformed.real_vector()) + .norm() + .less_equal(&S::RealScalar::from_f64(0.0001)); + + num_preserves += mask.count(); + num += S::Mask::all_true().count(); } let percentage = num_preserves as f64 / num as f64; assertables::assert_le!(percentage, 0.75); @@ -259,7 +254,11 @@ impl< let inv_mat: S::Matrix = g.inverse().matrix(); let mat_adj_x2 = Self::vee(&mat.mat_mul(Self::hat(x).mat_mul(inv_mat))); - assert_relative_eq!(mat_adj_x.real(), mat_adj_x2.real(), epsilon = 0.0001); + assert_relative_eq!( + mat_adj_x.real_vector(), + mat_adj_x2.real_vector(), + epsilon = 0.0001 + ); } } for a in &tangent_examples { @@ -270,7 +269,11 @@ impl< let hat_ba = Self::hat(b).mat_mul(Self::hat(a)); let lie_bracket_a_b = Self::vee(&(hat_ab - hat_ba)); - assert_relative_eq!(ad_a_b.real(), lie_bracket_a_b.real(), epsilon = 0.0001); + assert_relative_eq!( + ad_a_b.real_vector(), + lie_bracket_a_b.real_vector(), + epsilon = 0.0001 + ); } } } @@ -280,13 +283,13 @@ impl< let tangent_examples: Vec> = G::tangent_examples(); for g in &group_examples { - let matrix_before = *g.compact().real(); - let matrix_after = *Self::exp(&g.log()).compact().real(); + let matrix_before = *g.compact().real_matrix(); + let matrix_after = *Self::exp(&g.log()).compact().real_matrix(); assert_relative_eq!(matrix_before, matrix_after, epsilon = 0.0001); - let t = *g.clone().inverse().log().real(); - let t2 = -g.log().real(); + let t = *g.clone().inverse().log().real_vector(); + let t2 = -(*g.log().real_vector()); assert_relative_eq!(t, t2, epsilon = 0.0001); } for omega in &tangent_examples { @@ -295,8 +298,8 @@ impl< let exp_neg_omega = Self::exp(&neg_omega); assert_relative_eq!( - exp_inverse.compact().real(), - exp_neg_omega.compact().real(), + exp_inverse.compact(), + exp_neg_omega.compact(), epsilon = 0.0001 ); } @@ -307,8 +310,8 @@ impl< for omega in &tangent_examples { assert_relative_eq!( - omega.real(), - Self::vee(&Self::hat(omega)).real(), + omega.real_vector(), + Self::vee(&Self::hat(omega)).real_vector(), epsilon = 0.0001 ); } @@ -323,8 +326,8 @@ impl< let left_hugging = (g1.group_mul(g2)).group_mul(g3); let right_hugging = g1.group_mul(&g2.group_mul(g3)); assert_relative_eq!( - left_hugging.compact().real(), - right_hugging.compact().real(), + left_hugging.compact(), + right_hugging.compact(), epsilon = 0.0001 ); } @@ -335,8 +338,8 @@ impl< let daz_from_foo_transform_1 = g2.inverse().group_mul(&g1.inverse()); let daz_from_foo_transform_2 = g1.group_mul(g2).inverse(); assert_relative_eq!( - daz_from_foo_transform_1.compact().real(), - daz_from_foo_transform_2.compact().real(), + daz_from_foo_transform_1.compact(), + daz_from_foo_transform_2.compact(), epsilon = 0.0001 ); } @@ -358,597 +361,3 @@ impl< Self::adjoint_tests(); } } - -impl< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl, - > std::ops::Mul<&Self> for LieGroup -{ - type Output = Self; - - fn mul(self, rhs: &Self) -> Self { - self.group_mul(rhs) - } -} - -// impl< -// const DOF: usize, -// const PARAMS: usize, -// const POINT: usize, -// const AMBIENT: usize, -// G: IsLieGroupImpl, -// > std::ops::Mul<&VecF64> for LieGroup -// { -// // TODO: More generic implementation for S::Vector - -// type Output = VecF64; - -// fn mul(self, rhs: &VecF64) -> VecF64 { -// G::transform(&self.params, rhs) -// } -// } - -#[derive(Debug, Clone)] -struct LeftGroupManifold< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl, -> { - group: LieGroup, -} - -impl< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl + Clone + Debug, - > ParamsImpl - for LeftGroupManifold -{ - fn are_params_valid(params: &>::Vector) -> bool { - G::are_params_valid(params) - } - - fn params_examples() -> Vec<>::Vector> { - G::params_examples() - } - - fn invalid_params_examples() -> Vec<>::Vector> { - G::invalid_params_examples() - } -} - -impl< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl + Clone + Debug, - > HasParams - for LeftGroupManifold -{ - fn from_params(params: &>::Vector) -> Self { - Self { - group: LieGroup::from_params(params), - } - } - - fn set_params(&mut self, params: &>::Vector) { - self.group.set_params(params) - } - - fn params(&self) -> &>::Vector { - self.group.params() - } -} - -impl< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl + Clone + Debug, - > IsManifold - for LeftGroupManifold -{ - fn oplus(&self, tangent: &>::Vector) -> Self { - Self { - group: LieGroup::::exp(tangent) - .group_mul(&self.group), - } - } - - fn ominus(&self, rhs: &Self) -> >::Vector { - self.group.inverse().group_mul(&rhs.group).log() - } - - fn params(&self) -> &>::Vector { - self.group.params() - } -} - -impl< - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - G: IsF64LieGroupImpl, - > LieGroup -{ - /// derivative of group multiplication with respect to the first argument - pub fn da_a_mul_b(a: &Self, b: &Self) -> MatF64 { - G::da_a_mul_b(a.params(), b.params()) - } - - /// derivative of group multiplication with respect to the second argument - pub fn db_a_mul_b(a: &Self, b: &Self) -> MatF64 { - G::db_a_mul_b(a.params(), b.params()) - } - - /// derivative of exponential map - pub fn dx_exp(tangent: &VecF64) -> MatF64 { - G::dx_exp(tangent) - } - - /// derivative of exponential map at the identity - pub fn dx_exp_x_at_0() -> MatF64 { - G::dx_exp_x_at_0() - } - - /// derivative of exponential map times point at the identity - pub fn dx_exp_x_times_point_at_0(point: &VecF64) -> MatF64 { - G::dx_exp_x_times_point_at_0(point) - } - - /// derivative of log(exp(x)) at the identity - pub fn dx_log_a_exp_x_b_at_0(a: &Self, b: &Self) -> MatF64 { - let ab = a.group_mul(b); - Self::dx_log_x(ab.params()) - * Self::da_a_mul_b(&Self::identity(), &ab) - * Self::dx_exp_x_at_0() - * Self::adj(a) - } - - /// derivative of logarithmic map - pub fn dx_log_x(params: &VecF64) -> MatF64 { - G::dx_log_x(params) - } - - /// dual representation of the group - pub fn to_dual_c(self) -> LieGroup { - let dual_params = DualV::::c(self.params); - LieGroup::from_params(&dual_params) - } - - fn adjoint_jacobian_tests() { - let tangent_examples: Vec> = G::tangent_examples(); - - for a in &tangent_examples { - let ad_a: MatF64 = Self::ad(a); - - for b in &tangent_examples { - if DOF > 0 { - let num_diff_ad_a = VectorValuedMapFromVector::sym_diff_quotient( - |x: VecF64| { - Self::vee( - &(&Self::hat(a) * Self::hat(&x) - Self::hat(&x) * &Self::hat(a)), - ) - }, - *b, - 0.0001, - ); - for i in 0..DOF { - assert_relative_eq!( - ad_a.get_col_vec(i), - num_diff_ad_a.get([i]), - epsilon = 0.001 - ); - } - - let auto_diff_ad_a = VectorValuedMapFromVector::fw_autodiff( - |x: DualV| { - // Self::vee( - // &(&Self::hat(a) * Self::hat(&x) - Self::hat(&x) * &Self::hat(a)), - // ) - let hat_x = - as IsLieGroup< - f64, - DOF, - PARAMS, - POINT, - AMBIENT, - 1, - >>::DualGroup::hat(&x); - let hat_a = - as IsLieGroup< - f64, - DOF, - PARAMS, - POINT, - AMBIENT, - 1, - >>::DualGroup::hat(&DualV::c(*a)); - let mul = hat_a.mat_mul(hat_x.clone()) - hat_x.mat_mul(hat_a); - as IsLieGroup< - f64, - DOF, - PARAMS, - POINT, - AMBIENT, - 1, - >>::DualGroup::vee(&mul) - //hat_x - }, - *b, - ); - - for i in 0..DOF { - assert_relative_eq!( - ad_a.get_col_vec(i), - auto_diff_ad_a.get([i]), - epsilon = 0.001 - ); - } - } - } - } - } - - fn test_hat_jacobians() { - for x in G::tangent_examples() { - // x == vee(hat(x)) - let vee_hat_x: VecF64 = Self::vee(&Self::hat(&x)); - assert_relative_eq!(x, vee_hat_x, epsilon = 0.0001); - - // dx hat(x) - { - let hat_x = |v: VecF64| -> MatF64 { Self::hat(&v) }; - let dual_hat_x = |vv: DualV| -> DualM { G::DualG::hat(&vv) }; - - let num_diff = MatrixValuedMapFromVector::sym_diff_quotient(hat_x, x, 0.0001); - let auto_diff = MatrixValuedMapFromVector::fw_autodiff(dual_hat_x, x); - - for i in 0..DOF { - assert_relative_eq!(auto_diff.get([i]), num_diff.get([i]), epsilon = 0.001); - } - } - - // dx vee(y) - { - let a = Self::hat(&x); - let vee_x = |v: MatF64| -> VecF64 { Self::vee(&v) }; - let dual_vee_x = |vv: DualM| -> DualV { G::DualG::vee(&vv) }; - - let num_diff = VectorValuedMapFromMatrix::sym_diff_quotient(vee_x, a, 0.0001); - let auto_diff = VectorValuedMapFromMatrix::fw_autodiff(dual_vee_x, a); - - for i in 0..AMBIENT { - for j in 0..AMBIENT { - assert_relative_eq!( - auto_diff.get([i, j]), - num_diff.get([i, j]), - epsilon = 0.001 - ); - } - } - } - } - } - - fn test_mul_jacobians() { - for a in Self::element_examples() { - for b in Self::element_examples() { - let a_dual = a.clone().to_dual_c(); - let b_dual = b.clone().to_dual_c(); - - let dual_mul_x = |vv: DualV| -> DualV { - LieGroup::::from_params(&vv) - .group_mul(&b_dual) - .params() - .clone() - }; - - let auto_diff = - VectorValuedMapFromVector::static_fw_autodiff(dual_mul_x, *a.clone().params()); - let analytic_diff = Self::da_a_mul_b(&a, &b); - assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); - - let dual_mul_x = |vv: DualV| -> DualV { - a_dual - .group_mul( - &LieGroup::::from_params( - &vv, - ), - ) - .params() - .clone() - }; - - let auto_diff = - VectorValuedMapFromVector::static_fw_autodiff(dual_mul_x, *b.clone().params()); - let analytic_diff = Self::db_a_mul_b(&a, &b); - assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); - } - } - } - - fn test_exp_log_jacobians() { - for t in G::tangent_examples() { - // x == log(exp(x)) - - let log_exp_t: VecF64 = Self::log(&Self::exp(&t)); - assert_relative_eq!(t, log_exp_t, epsilon = 0.0001); - - // dx exp(x).matrix - { - let exp_t = |t: VecF64| -> VecF64 { *Self::exp(&t).params() }; - let dual_exp_t = |vv: DualV| -> DualV { - LieGroup::::exp(&vv) - .params() - .clone() - }; - - let num_diff = - VectorValuedMapFromVector::static_sym_diff_quotient(exp_t, t, 0.0001); - let auto_diff = VectorValuedMapFromVector::static_fw_autodiff(dual_exp_t, t); - - assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); - - let analytic_diff = Self::dx_exp(&t); - assert_relative_eq!(analytic_diff, num_diff, epsilon = 0.001); - } - } - - //dx exp(x) at x=0 - { - let exp_t = |t: VecF64| -> VecF64 { *Self::exp(&t).params() }; - let dual_exp_t = |vv: DualV| -> DualV { - LieGroup::::exp(&vv) - .params() - .clone() - }; - - let analytic_diff = Self::dx_exp_x_at_0(); - let num_diff = - VectorValuedMapFromVector::static_sym_diff_quotient(exp_t, VecF64::zeros(), 0.0001); - let auto_diff = - VectorValuedMapFromVector::static_fw_autodiff(dual_exp_t, VecF64::zeros()); - - assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); - assert_relative_eq!(analytic_diff, num_diff, epsilon = 0.001); - } - - for point in example_points::() { - let exp_t = |t: VecF64| -> VecF64 { Self::exp(&t).transform(&point) }; - let dual_exp_t = |vv: DualV| -> DualV { - LieGroup::::exp(&vv) - .transform(&DualV::c(point)) - }; - - let analytic_diff = Self::dx_exp_x_times_point_at_0(&point); - let num_diff = - VectorValuedMapFromVector::static_sym_diff_quotient(exp_t, VecF64::zeros(), 0.0001); - let auto_diff = - VectorValuedMapFromVector::static_fw_autodiff(dual_exp_t, VecF64::zeros()); - - assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); - assert_relative_eq!(analytic_diff, num_diff, epsilon = 0.001); - } - - for g in Self::element_examples() { - // dx log(y) - { - if g.has_shortest_path_ambiguity() { - // jacobian not uniquely defined, let's skip these cases - continue; - } - - let log_x = |t: VecF64| -> VecF64 { Self::exp(&t).group_mul(&g).log() }; - let o = VecF64::zeros(); - - let dual_params = DualV::c(*g.params()); - let dual_g = - LieGroup::::from_params( - &dual_params, - ); - let dual_log_x = |t: DualV| -> DualV { - LieGroup::::exp(&t) - .group_mul(&dual_g) - .log() - }; - - let num_diff = - VectorValuedMapFromVector::static_sym_diff_quotient(log_x, o, 0.0001); - let auto_diff = VectorValuedMapFromVector::static_fw_autodiff(dual_log_x, o); - - assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); - - let dual_log_x = |g: DualV| -> DualV { - LieGroup::::from_params(&g) - .log() - }; - - let auto_diff = - VectorValuedMapFromVector::static_fw_autodiff(dual_log_x, *g.params()); - - let analytic_diff = Self::dx_log_x(g.params()); - assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); - } - } - - println!("---"); - - for a in Self::element_examples() { - for b in Self::element_examples() { - println!("a: {:?}, b: {:?}", a, b); - let dual_params_a = DualV::c(*a.clone().params()); - let dual_a = - LieGroup::::from_params( - &dual_params_a, - ); - - let dual_params_b = DualV::c(*b.params()); - let dual_b = - LieGroup::::from_params( - &dual_params_b, - ); - let dual_log_x = |t: DualV| -> DualV { - dual_a - .group_mul( - &LieGroup::::exp(&t) - .group_mul(&dual_b), - ) - .log() - }; - - let analytic_diff = Self::dx_log_a_exp_x_b_at_0(&a, &b); - let o = VecF64::zeros(); - let auto_diff = VectorValuedMapFromVector::static_fw_autodiff(dual_log_x, o); - - assert_relative_eq!(auto_diff, analytic_diff, epsilon = 0.001); - } - } - } - - /// run all real tests - pub fn real_test_suite() { - Self::test_mul_jacobians(); - Self::adjoint_jacobian_tests(); - Self::test_hat_jacobians(); - Self::test_exp_log_jacobians(); - } -} - -impl< - S: IsScalar, - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - const AMBIENT: usize, - const BATCH_SIZE: usize, - G: IsLieGroupImpl, - > Display for LieGroup -{ - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.compact().real()) - } -} - -impl< - const DOF: usize, - const PARAMS: usize, - const POINT: usize, - G: IsF64LieFactorGroupImpl, - > LieGroup -{ - /// V matrix - used in the exponential map - pub fn mat_v(tangent: &VecF64) -> MatF64 { - G::mat_v(tangent) - } - - /// V matrix inverse - used in the logarithmic map - pub fn mat_v_inverse(tangent: &VecF64) -> MatF64 { - G::mat_v_inverse(tangent) - } - - /// derivative of V matrix - pub fn dx_mat_v(tangent: &VecF64) -> [MatF64; DOF] { - G::dx_mat_v(tangent) - } - - /// derivative of V matrix inverse - pub fn dx_mat_v_inverse(tangent: &VecF64) -> [MatF64; DOF] { - G::dx_mat_v_inverse(tangent) - } - - /// derivative of V matrix times point - pub fn dparams_matrix_times_point( - params: &VecF64, - point: &VecF64, - ) -> MatF64 { - G::dparams_matrix_times_point(params, point) - } - - fn test_mat_v() { - for t in G::tangent_examples() { - let mat_v = Self::mat_v(&t); - let mat_v_inverse = Self::mat_v_inverse(&t); - - assert_relative_eq!( - mat_v.mat_mul(mat_v_inverse), - MatF64::::identity(), - epsilon = 0.0001 - ); - } - } - - fn test_mat_v_jacobian() { - for t in G::tangent_examples() { - println!("t: {}", t); - let mat_v_jacobian = Self::dx_mat_v(&t); - - let mat_v_x = |t: VecF64| -> MatF64 { Self::mat_v(&t) }; - let num_diff = MatrixValuedMapFromVector::sym_diff_quotient(mat_v_x, t, 0.0001); - - for i in 0..DOF { - println!("i: {}", i); - assert_relative_eq!(mat_v_jacobian[i], num_diff.get([i]), epsilon = 0.001); - } - - let mat_v_inv_jacobian = Self::dx_mat_v_inverse(&t); - - let mat_v_x_inv = |t: VecF64| -> MatF64 { Self::mat_v_inverse(&t) }; - let num_diff = MatrixValuedMapFromVector::sym_diff_quotient(mat_v_x_inv, t, 0.0001); - - for i in 0..DOF { - println!("i: {}", i); - assert_relative_eq!(mat_v_inv_jacobian[i], num_diff.get([i]), epsilon = 0.001); - } - } - - for p in example_points::() { - for a in Self::element_examples() { - println!("a: {:?}", a); - println!("p: {:?}", p); - let dual_params_a = DualV::c(*a.clone().params()); - let _dual_a = LieGroup::::from_params( - &dual_params_a, - ); - let dual_p = DualV::c(p); - - let dual_fn = |x: DualV| -> DualV { - LieGroup::::from_params(&x) - .matrix() - * dual_p.clone() - }; - - let auto_diff = VectorValuedMapFromVector::static_fw_autodiff(dual_fn, *a.params()); - let analytic_diff = Self::dparams_matrix_times_point(a.params(), &p); - - assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); - } - } - } - - /// run all tests - pub fn real_factor_test_suite() { - Self::test_mat_v(); - Self::test_mat_v_jacobian(); - } -} diff --git a/crates/sophus_lie/src/lie_group_manifold.rs b/crates/sophus_lie/src/lie_group_manifold.rs new file mode 100644 index 0000000..d206dd4 --- /dev/null +++ b/crates/sophus_lie/src/lie_group_manifold.rs @@ -0,0 +1,97 @@ +use crate::lie_group::LieGroup; +use crate::traits::IsLieGroupImpl; +use sophus_core::calculus::manifold::traits::IsManifold; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; +use std::fmt::Debug; + +#[derive(Debug, Clone)] +struct LeftGroupManifold< + S: IsScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsLieGroupImpl, +> { + group: LieGroup, +} + +impl< + S: IsScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsLieGroupImpl + Clone + Debug, + > ParamsImpl + for LeftGroupManifold +{ + fn are_params_valid(params: &>::Vector) -> S::Mask { + G::are_params_valid(params) + } + + fn params_examples() -> Vec<>::Vector> { + G::params_examples() + } + + fn invalid_params_examples() -> Vec<>::Vector> { + G::invalid_params_examples() + } +} + +impl< + S: IsScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsLieGroupImpl + Clone + Debug, + > HasParams + for LeftGroupManifold +{ + fn from_params(params: &>::Vector) -> Self { + Self { + group: LieGroup::from_params(params), + } + } + + fn set_params(&mut self, params: &>::Vector) { + self.group.set_params(params) + } + + fn params(&self) -> &>::Vector { + self.group.params() + } +} + +impl< + S: IsScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsLieGroupImpl + Clone + Debug, + > IsManifold + for LeftGroupManifold +{ + fn oplus(&self, tangent: &>::Vector) -> Self { + Self { + group: LieGroup::::exp(tangent) + .group_mul(&self.group), + } + } + + fn ominus(&self, rhs: &Self) -> >::Vector { + self.group.inverse().group_mul(&rhs.group).log() + } + + fn params(&self) -> &>::Vector { + self.group.params() + } +} diff --git a/crates/sophus_lie/src/pyo3.rs b/crates/sophus_lie/src/pyo3.rs deleted file mode 100644 index c05e00d..0000000 --- a/crates/sophus_lie/src/pyo3.rs +++ /dev/null @@ -1,2 +0,0 @@ -/// Lie groups -pub mod lie_groups; diff --git a/crates/sophus_lie/src/real_lie_group.rs b/crates/sophus_lie/src/real_lie_group.rs new file mode 100644 index 0000000..e139bb6 --- /dev/null +++ b/crates/sophus_lie/src/real_lie_group.rs @@ -0,0 +1,555 @@ +use super::traits::IsLieGroupImpl; +use super::traits::IsRealLieGroupImpl; +use crate::groups::isometry2::Isometry2; +use crate::groups::isometry3::Isometry3; +use crate::groups::rotation2::Rotation2; +use crate::groups::rotation3::Rotation3; +use crate::lie_group::LieGroup; +use approx::assert_relative_eq; +use nalgebra::SVector; +use sophus_core::calculus::dual::dual_scalar::DualBatchScalar; +use sophus_core::calculus::dual::dual_scalar::DualScalar; +use sophus_core::calculus::maps::matrix_valued_maps::MatrixValuedMapFromVector; +use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromMatrix; +use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::BatchScalarF64; +use sophus_core::params::HasParams; +use sophus_core::tensor::tensor_view::IsTensorLike; +use std::fmt::Display; +use std::fmt::Formatter; + +impl< + S: IsRealScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsRealLieGroupImpl, + > LieGroup +where + SVector: IsVector, +{ + /// derivative of exponential map at the identity + pub fn dx_exp_x_at_0() -> S::Matrix { + G::dx_exp_x_at_0() + } + + /// derivative of exponential map times point at the identity + pub fn dx_exp_x_times_point_at_0(point: S::Vector) -> S::Matrix { + G::dx_exp_x_times_point_at_0(point) + } + + /// are there multiple shortest paths to the identity? + pub fn has_shortest_path_ambiguity(&self) -> S::Mask { + G::has_shortest_path_ambiguity(&self.params) + } + + /// derivative of exponential map + pub fn dx_exp(tangent: &S::Vector) -> S::Matrix { + G::dx_exp(tangent) + } + + /// derivative of logarithmic map + pub fn dx_log_x(params: &S::Vector) -> S::Matrix { + G::dx_log_x(params) + } + + /// dual representation of the group + pub fn to_dual_c( + self, + ) -> LieGroup { + LieGroup::from_params(&self.params.to_dual()) + } + + /// derivative of log(exp(x)) at the identity + pub fn dx_log_a_exp_x_b_at_0(a: &Self, b: &Self) -> S::Matrix { + let ab = a.group_mul(b); + Self::dx_log_x(ab.params()) + .mat_mul(Self::da_a_mul_b(&Self::identity(), &ab)) + .mat_mul(Self::dx_exp_x_at_0()) + .mat_mul(Self::adj(a)) + } + + /// derivative of group multiplication with respect to the first argument + pub fn da_a_mul_b(a: &Self, b: &Self) -> S::Matrix { + G::da_a_mul_b(a.params(), b.params()) + } + + /// derivative of group multiplication with respect to the second argument + pub fn db_a_mul_b(a: &Self, b: &Self) -> S::Matrix { + G::db_a_mul_b(a.params(), b.params()) + } + + // fn test_exp_log_jacobians() { + + // } +} + +impl< + S: IsScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const AMBIENT: usize, + const BATCH_SIZE: usize, + G: IsLieGroupImpl, + > Display for LieGroup +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.compact()) + } +} + +/// A trait for Lie groups. +pub trait RealLieGroupTest { + /// Run all tests. + fn run_real_tests() { + Self::adjoint_jacobian_tests(); + Self::exp_log_jacobians_tests(); + Self::hat_jacobians_tests(); + Self::mul_jacobians_tests(); + } + + /// Test hat and vee operators. + fn hat_jacobians_tests(); + + /// Test group multiplication jacobians. + fn mul_jacobians_tests(); + + /// Test adjoint jacobian. + fn adjoint_jacobian_tests(); + + /// exp_log_jacobians_tests + fn exp_log_jacobians_tests(); +} + +macro_rules! def_real_group_test_template { + ($scalar:ty, $dual_scalar:ty, $group: ty, $dual_group: ty, $batch:literal +) => { + impl RealLieGroupTest for $group { + + fn adjoint_jacobian_tests() { + use crate::traits::IsLieGroup; + const DOF: usize = <$group>::DOF; + use sophus_core::calculus::manifold::traits::TangentImpl; + + let tangent_examples: Vec<<$scalar as IsScalar<$batch>>::Vector> + = <$group>::tangent_examples(); + + for a in &tangent_examples { + let ad_a: <$scalar as IsScalar<$batch>>::Matrix = <$group>::ad(a); + + for b in &tangent_examples { + if DOF > 0 { + let lambda = |x: <$scalar as IsScalar<$batch>>::Vector| { + let lhs = <$group>::hat(a).mat_mul(<$group>::hat(&x)); + let rhs = <$group>::hat(&x).mat_mul(<$group>::hat(a)); + <$group>::vee(&(lhs - rhs)) + }; + + let num_diff_ad_a = + VectorValuedMapFromVector::<$scalar, $batch> + ::static_sym_diff_quotient + ( + |x| { + lambda(x) + }, + *b.real_vector(), + 0.0001, + ); + approx::assert_relative_eq!( + ad_a.real_matrix(), + num_diff_ad_a.real_matrix(), + epsilon = 0.0001 + ); + + let dual_a = + <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector + ( + a.clone() + ); + + let auto_diff_ad_a + = VectorValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff + ( + |x| { + let hat_x = <$dual_group>::hat(&x); + let hat_a = <$dual_group>::hat(&dual_a); + let mul = hat_a.mat_mul(hat_x.clone()) + - hat_x.mat_mul(hat_a); + <$dual_group>::vee(&mul) + }, + *b, + ); + + for i in 0..DOF { + assert_relative_eq!( + ad_a.get_col_vec(i), + auto_diff_ad_a.get([i]), + epsilon = 0.001 + ); + } + + } + } + } + } + + fn exp_log_jacobians_tests(){ + use crate::traits::IsLieGroup; + const DOF: usize = <$group>::DOF; + const POINT: usize = <$group>::POINT; + const PARAMS: usize = <$group>::PARAMS; + + use sophus_core::calculus::manifold::traits::TangentImpl; + use sophus_core::points::example_points; + + for t in <$group>::tangent_examples() { + // x == log(exp(x)) + + let log_exp_t: <$scalar as IsScalar<$batch>>::Vector = + Self::log(&Self::exp(&t)); + assert_relative_eq!(t, log_exp_t, epsilon = 0.0001); + + // dx exp(x).matrix + { + let exp_t = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Vector + { + *Self::exp(&t).params() + }; + let dual_exp_t = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::exp(&vv).params().clone() + }; + + let dx_exp_num_diff = + VectorValuedMapFromVector::static_sym_diff_quotient(exp_t, t, 0.0001); + let dx_exp_auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_exp_t, + t + ); + + assert_relative_eq!(dx_exp_auto_diff, dx_exp_num_diff, epsilon = 0.001); + + let dx_exp_analytic_diff = Self::dx_exp(&t); + assert_relative_eq!(dx_exp_analytic_diff, dx_exp_num_diff, epsilon = 0.001); + } + } + + //dx exp(x) at x=0 + { + let exp_t = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Vector + { + *Self::exp(&t).params() + }; + let dual_exp_t = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::exp(&vv).params().clone() + }; + + let analytic_diff = Self::dx_exp_x_at_0(); + let num_diff = + VectorValuedMapFromVector::static_sym_diff_quotient + ( + exp_t, + <$scalar as IsScalar<$batch>>::Vector::zeros(), + 0.0001 + ); + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_exp_t, + <$scalar as IsScalar<$batch>>::Vector::zeros() + ); + + assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); + assert_relative_eq!(analytic_diff, num_diff, epsilon = 0.001); + } + + for point in example_points::<$scalar, POINT, $batch>() { + let exp_t = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Vector + { + Self::exp(&t).transform(&point) + }; + let dual_exp_t = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::exp(&vv).transform(&<$dual_scalar as IsScalar<$batch>> + ::Vector::from_real_vector(point)) + }; + + let analytic_diff = Self::dx_exp_x_times_point_at_0(point); + let num_diff = + VectorValuedMapFromVector::static_sym_diff_quotient + ( + exp_t, + <$scalar as IsScalar<$batch>>::Vector::zeros(), + 0.0001 + ); + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_exp_t, + <$scalar as IsScalar<$batch>>::Vector::zeros() + ); + assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); + assert_relative_eq!(analytic_diff, num_diff, epsilon = 0.001); + } + + for g in Self::element_examples() { + // dx log(y) + { + use sophus_core::linalg::bool_mask::BoolMask; + if g.has_shortest_path_ambiguity().any() { + // jacobian not uniquely defined, let's skip these cases + continue; + } + + let log_x = |t: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Vector + { + Self::exp(&t).group_mul(&g).log() + }; + let o = <$scalar as IsScalar<$batch>>::Vector::zeros(); + + let dual_params = <$dual_scalar as IsScalar<$batch>>::Vector + ::from_real_vector(*g.params()); + let dual_g = <$dual_group>::from_params + ( + &dual_params, + ); + let dual_log_x = |t: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::exp(&t).group_mul(&dual_g).log() + }; + + let num_diff = + VectorValuedMapFromVector::static_sym_diff_quotient(log_x, o, 0.0001); + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_log_x, + o + ); + assert_relative_eq!(auto_diff, num_diff, epsilon = 0.001); + + let dual_log_x = |g: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::from_params(&g).log() + }; + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff(dual_log_x, *g.params()); + + let analytic_diff = Self::dx_log_x(g.params()); + assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); + } + } + + for a in Self::element_examples() { + for b in Self::element_examples() { + let dual_params_a = + <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector + ( + *a.clone().params() + ); + let dual_a = <$dual_group>::from_params(&dual_params_a); + let dual_params_b = + <$dual_scalar as IsScalar<$batch>>::Vector::from_real_vector + ( + *b.params() + ); + let dual_b = <$dual_group>::from_params + ( + &dual_params_b, + ); + let dual_log_x = |t: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + dual_a.group_mul( + &<$dual_group>::exp(&t) + .group_mul(&dual_b) + ).log() + }; + + let analytic_diff = Self::dx_log_a_exp_x_b_at_0(&a, &b); + let o = <$scalar as IsScalar<$batch>>::Vector::zeros(); + let auto_diff = VectorValuedMapFromVector::<$dual_scalar, $batch> + ::static_fw_autodiff(dual_log_x, o); + assert_relative_eq!(auto_diff, analytic_diff, epsilon = 0.001); + } + } + } + + + fn hat_jacobians_tests() { + use crate::traits::IsLieGroup; + use sophus_core::calculus::manifold::traits::TangentImpl; + const DOF: usize = <$group>::DOF; + const AMBIENT: usize = <$group>::AMBIENT; + + + for x in <$group>::tangent_examples() { + // x == vee(hat(x)) + let vee_hat_x: <$scalar as IsScalar<$batch>>::Vector + = <$group>::vee(&<$group>::hat(&x)); + assert_relative_eq!(x, vee_hat_x, epsilon = 0.0001); + + // dx hat(x) + { + let hat_x = |v: <$scalar as IsScalar<$batch>>::Vector| + -> <$scalar as IsScalar<$batch>>::Matrix + { + <$group>::hat(&v) + }; + let dual_hat_x = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Matrix + { + <$dual_group>::hat(&vv) + }; + + let num_diff = + MatrixValuedMapFromVector::sym_diff_quotient(hat_x, x, 0.0001); + let auto_diff = + MatrixValuedMapFromVector::<$dual_scalar, $batch>::fw_autodiff( + dual_hat_x, x); + + for i in 0..DOF { + assert_relative_eq!( + auto_diff.get([i]), + num_diff.get([i]), + epsilon = 0.001 + ); + } + } + + // dx vee(y) + { + let a = Self::hat(&x); + let vee_x = |v: <$scalar as IsScalar<$batch>>::Matrix| + -> <$scalar as IsScalar<$batch>>::Vector + { + <$group>::vee(&v) + }; + let dual_vee_x = + |vv: <$dual_scalar as IsScalar<$batch>>::Matrix| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::vee(&vv) + }; + + let num_diff = + VectorValuedMapFromMatrix::sym_diff_quotient(vee_x, a, 0.0001); + let auto_diff = + VectorValuedMapFromMatrix::<$dual_scalar, $batch>::fw_autodiff + ( + dual_vee_x, + a + ); + + for i in 0..AMBIENT { + for j in 0..AMBIENT { + assert_relative_eq!( + auto_diff.get([i, j]), + num_diff.get([i, j]), + epsilon = 0.001 + ); + } + } + } + } + } + + fn mul_jacobians_tests() { + use crate::traits::IsLieGroup; + const PARAMS: usize = <$group>::PARAMS; + for a in Self::element_examples() { + for b in Self::element_examples() { + let a_dual = a.clone().to_dual_c(); + let b_dual = b.clone().to_dual_c(); + + let dual_mul_x = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + <$dual_group>::from_params(&vv) + .group_mul(&b_dual).params().clone() + }; + + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_mul_x, + *a.clone().params() + ); + let analytic_diff = Self::da_a_mul_b(&a, &b); + assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); + + let dual_mul_x = |vv: <$dual_scalar as IsScalar<$batch>>::Vector| + -> <$dual_scalar as IsScalar<$batch>>::Vector + { + a_dual.group_mul(& LieGroup::from_params(&vv)).params().clone() + }; + + let auto_diff = + VectorValuedMapFromVector::<$dual_scalar, $batch>::static_fw_autodiff + ( + dual_mul_x, *b.clone().params() + ); + let analytic_diff = Self::db_a_mul_b(&a, &b); + assert_relative_eq!(analytic_diff, auto_diff, epsilon = 0.001); + } + } + } + + } + }; +} + +def_real_group_test_template!(f64, DualScalar, Rotation2, Rotation2, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Rotation2, 8>, + Rotation2, 8>, + 8 +); + +def_real_group_test_template!(f64, DualScalar, Isometry2, Isometry2, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Isometry2, 8>, + Isometry2, 8>, + 8 +); + +def_real_group_test_template!(f64, DualScalar, Rotation3, Rotation3, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Rotation3, 8>, + Rotation3, 8>, + 8 +); + +def_real_group_test_template!(f64, DualScalar, Isometry3, Isometry3, 1); +def_real_group_test_template!( + BatchScalarF64<8>, + DualBatchScalar<8>, + Isometry3, 8>, + Isometry3, 8>, + 8 +); diff --git a/crates/sophus_lie/src/rotation2.rs b/crates/sophus_lie/src/rotation2.rs deleted file mode 100644 index d92ef97..0000000 --- a/crates/sophus_lie/src/rotation2.rs +++ /dev/null @@ -1,292 +0,0 @@ -use crate::traits::IsLieGroupImpl; -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::manifold::{self}; -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::vector::IsVectorLike; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use std::marker::PhantomData; - -/// 2D rotation group implementation struct - SO(2) -#[derive(Debug, Copy, Clone)] -pub struct Rotation2Impl> { - phanton: PhantomData, -} - -impl> Rotation2Impl {} - -impl> ParamsImpl for Rotation2Impl { - fn params_examples() -> Vec> { - let mut params = vec![]; - for i in 0..10 { - let angle = i as f64 * std::f64::consts::PI / 5.0; - params.push( - Rotation2::::exp(&S::Vector::<1>::from_array([angle.into()])) - .params() - .clone(), - ); - } - params - } - - fn invalid_params_examples() -> Vec> { - vec![ - S::Vector::<2>::from_array([S::c(0.0), S::c(0.0)]), - S::Vector::<2>::from_array([S::c(0.5), S::c(0.5)]), - S::Vector::<2>::from_array([S::c(0.5), S::c(-0.5)]), - ] - } - - fn are_params_valid(params: &S::Vector<2>) -> bool { - let norm = params.norm(); - (norm - S::c(1.0)).abs() < S::c(1e-6) - } -} - -impl> manifold::traits::TangentImpl for Rotation2Impl { - fn tangent_examples() -> Vec> { - vec![ - S::Vector::<1>::from_array([0.0.into()]), - S::Vector::<1>::from_array([1.0.into()]), - S::Vector::<1>::from_array([(-1.0).into()]), - S::Vector::<1>::from_array([0.5.into()]), - S::Vector::<1>::from_array([(-0.5).into()]), - ] - } -} - -impl> crate::traits::IsLieGroupImpl for Rotation2Impl { - type GenG> = Rotation2Impl; - type RealG = Rotation2Impl; - type DualG = Rotation2Impl; - - const IS_ORIGIN_PRESERVING: bool = true; - const IS_AXIS_DIRECTION_PRESERVING: bool = false; - const IS_DIRECTION_VECTOR_PRESERVING: bool = false; - const IS_SHAPE_PRESERVING: bool = true; - const IS_DISTANCE_PRESERVING: bool = true; - const IS_PARALLEL_LINE_PRESERVING: bool = true; - - fn identity_params() -> S::Vector<2> { - S::Vector::<2>::from_array([1.0.into(), 0.0.into()]) - } - - fn adj(_params: &S::Vector<2>) -> S::Matrix<1, 1> { - S::Matrix::<1, 1>::identity() - } - - fn exp(omega: &S::Vector<1>) -> S::Vector<2> { - // angle to complex number - let angle = omega.get(0); - let cos = angle.clone().cos(); - let sin = angle.sin(); - S::Vector::<2>::from_array([cos, sin]) - } - - fn log(params: &S::Vector<2>) -> S::Vector<1> { - // complex number to angle - let angle = params.get(1).atan2(params.get(0)); - S::Vector::<1>::from_array([angle]) - } - - fn hat(omega: &S::Vector<1>) -> S::Matrix<2, 2> { - let angle = omega.clone().get(0); - S::Matrix::<2, 2>::from_array2([[0.0.into(), -angle.clone()], [angle, 0.0.into()]]) - } - - fn vee(hat: &S::Matrix<2, 2>) -> S::Vector<1> { - let angle = hat.get((1, 0)); - S::Vector::<1>::from_array([angle]) - } - - fn group_mul(params1: &S::Vector<2>, params2: &S::Vector<2>) -> S::Vector<2> { - let a = params1.get(0); - let b = params1.get(1); - let c = params2.get(0); - let d = params2.get(1); - - S::Vector::<2>::from_array([a.clone() * c.clone() - d.clone() * b.clone(), a * d + b * c]) - } - - fn inverse(params: &S::Vector<2>) -> S::Vector<2> { - S::Vector::<2>::from_array([params.get(0), -params.get(1)]) - } - - fn transform(params: &S::Vector<2>, point: &S::Vector<2>) -> S::Vector<2> { - Self::matrix(params) * point.clone() - } - - fn to_ambient(params: &S::Vector<2>) -> S::Vector<2> { - // homogeneous coordinates - params.clone() - } - - fn compact(params: &S::Vector<2>) -> S::Matrix<2, 2> { - Self::matrix(params) - } - - fn matrix(params: &S::Vector<2>) -> S::Matrix<2, 2> { - // rotation matrix - let cos = params.get(0); - let sin = params.get(1); - S::Matrix::<2, 2>::from_array2([[cos.clone(), -sin.clone()], [sin, cos]]) - } - - fn ad(_tangent: &S::Vector<1>) -> S::Matrix<1, 1> { - S::Matrix::<1, 1>::zero() - } - - fn has_shortest_path_ambiguity(params: &>::Vector<2>) -> bool { - (Self::log(params).real()[0].abs() - std::f64::consts::PI).abs() < 1e-5 - } -} - -impl crate::traits::IsF64LieGroupImpl<1, 2, 2, 2> for Rotation2Impl { - fn dx_exp_x_at_0() -> MatF64<2, 1> { - MatF64::from_c_array2([[0.0], [1.0]]) - } - - fn dx_exp_x_times_point_at_0(point: &sophus_calculus::types::VecF64<2>) -> MatF64<2, 1> { - MatF64::from_array2([[-point[1]], [point[0]]]) - } - - fn dx_exp(tangent: &VecF64<1>) -> MatF64<2, 1> { - let theta = tangent[0]; - - MatF64::<2, 1>::from_array2([[-theta.sin()], [theta.cos()]]) - } - - fn dx_log_x(params: &VecF64<2>) -> MatF64<1, 2> { - let x_0 = params[0]; - let x_1 = params[1]; - let x_sq = x_0 * x_0 + x_1 * x_1; - MatF64::from_array2([[-x_1 / x_sq, x_0 / x_sq]]) - } - - fn da_a_mul_b(_a: &VecF64<2>, b: &VecF64<2>) -> MatF64<2, 2> { - Self::matrix(b) - } - - fn db_a_mul_b(a: &VecF64<2>, _b: &VecF64<2>) -> MatF64<2, 2> { - Self::matrix(a) - } -} - -/// 2d rotation group - SO(2) -pub type Rotation2 = crate::lie_group::LieGroup>; - -impl> crate::traits::IsLieFactorGroupImpl for Rotation2Impl { - type GenFactorG> = Rotation2Impl; - type RealFactorG = Rotation2Impl; - type DualFactorG = Rotation2Impl; - - fn mat_v(v: &S::Vector<1>) -> S::Matrix<2, 2> { - let sin_theta_by_theta; - let one_minus_cos_theta_by_theta: S; - let theta = v.get(0); - let abs_theta = theta.clone().abs(); - if abs_theta.real() < 1e-6 { - let theta_sq = theta.clone() * theta.clone(); - sin_theta_by_theta = S::c(1.0) - S::c(1.0 / 6.0) * theta_sq.clone(); - one_minus_cos_theta_by_theta = - S::c(0.5) * theta.clone() - S::c(1.0 / 24.0) * theta * theta_sq; - } else { - sin_theta_by_theta = theta.clone().sin() / theta.clone(); - one_minus_cos_theta_by_theta = (S::c(1.0) - theta.clone().cos()) / theta; - } - S::Matrix::<2, 2>::from_array2([ - [ - sin_theta_by_theta.clone(), - -one_minus_cos_theta_by_theta.clone(), - ], - [one_minus_cos_theta_by_theta, sin_theta_by_theta], - ]) - } - - fn mat_v_inverse(tangent: &S::Vector<1>) -> S::Matrix<2, 2> { - let theta = tangent.get(0); - let halftheta = S::c(0.5) * theta.clone(); - let halftheta_by_tan_of_halftheta: S; - - let real_minus_one = theta.clone().cos() - S::c(1.0); - let abs_real_minus_one = real_minus_one.clone().abs(); - if abs_real_minus_one.real() < 1e-6 { - halftheta_by_tan_of_halftheta = - S::c(1.0) - S::c(1.0 / 12.0) * tangent.get(0) * tangent.get(0); - } else { - halftheta_by_tan_of_halftheta = -(halftheta.clone() * theta.sin()) / real_minus_one; - } - - S::Matrix::<2, 2>::from_array2([ - [halftheta_by_tan_of_halftheta.clone(), halftheta.clone()], - [-halftheta, halftheta_by_tan_of_halftheta], - ]) - } - - fn adj_of_translation(_params: &S::Vector<2>, point: &S::Vector<2>) -> S::Matrix<2, 1> { - S::Matrix::<2, 1>::from_array2([[point.get(1)], [-point.get(0)]]) - } - - fn ad_of_translation(point: &S::Vector<2>) -> S::Matrix<2, 1> { - S::Matrix::<2, 1>::from_array2([[point.get(1)], [-point.get(0)]]) - } -} - -impl crate::traits::IsF64LieFactorGroupImpl<1, 2, 2> for Rotation2Impl { - fn dx_mat_v(tangent: &VecF64<1>) -> [MatF64<2, 2>; 1] { - let theta = tangent[0]; - let theta_sq = theta * theta; - let sin_theta = theta.sin(); - let cos_theta = theta.cos(); - - let (m00, m01) = if theta_sq.abs() < 1e-6 { - ( - -theta / 3.0 + (theta * theta_sq) / 30.0, - -0.5 + 0.125 * theta_sq, - ) - } else { - ( - (theta * cos_theta - sin_theta) / theta_sq, - (-theta * sin_theta - cos_theta + 1.0) / theta_sq, - ) - }; - - [MatF64::<2, 2>::from_array2([[m00, m01], [-m01, m00]])] - } - - fn dparams_matrix_times_point(_params: &VecF64<2>, point: &VecF64<2>) -> MatF64<2, 2> { - MatF64::from_array2([[point[0], -point[1]], [point[1], point[0]]]) - } - - fn dx_mat_v_inverse(tangent: &VecF64<1>) -> [MatF64<2, 2>; 1] { - let theta = tangent[0]; - let sin_theta = theta.sin(); - let cos_theta = theta.cos(); - - let c = if theta.abs() < 1e-6 { - -1.0 / 6.0 * theta - } else { - (theta - sin_theta) / (2.0 * (cos_theta - 1.0)) - }; - - [MatF64::<2, 2>::from_array2([[c, 0.5], [-0.5, c]])] - } -} - -mod tests { - - #[test] - fn rotation2_prop_tests() { - use super::Rotation2; - use sophus_calculus::dual::dual_scalar::Dual; - - Rotation2::::test_suite(); - Rotation2::::test_suite(); - Rotation2::::real_test_suite(); - Rotation2::::real_factor_test_suite(); - } -} diff --git a/crates/sophus_lie/src/rotation3.rs b/crates/sophus_lie/src/rotation3.rs deleted file mode 100644 index 2dc917b..0000000 --- a/crates/sophus_lie/src/rotation3.rs +++ /dev/null @@ -1,589 +0,0 @@ -use std::marker::PhantomData; - -use nalgebra::ComplexField; - -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::manifold::{self}; -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::cross; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; - -use super::lie_group::LieGroup; -use super::traits::IsLieGroupImpl; - -/// 3d rotation implementation - SO(3) -#[derive(Debug, Copy, Clone)] -pub struct Rotation3Impl> { - phantom: PhantomData, -} - -impl> ParamsImpl for Rotation3Impl { - fn params_examples() -> Vec> { - let mut params = vec![]; - - params.push( - Rotation3::::exp(&S::Vector::<3>::from_c_array([0.0, 0.0, 0.0])) - .params() - .clone(), - ); - params.push( - Rotation3::::exp(&S::Vector::<3>::from_c_array([0.1, 0.5, -0.1])) - .params() - .clone(), - ); - params.push( - Rotation3::::exp(&S::Vector::<3>::from_c_array([0.0, 0.2, 1.0])) - .params() - .clone(), - ); - params.push( - Rotation3::::exp(&S::Vector::<3>::from_c_array([-0.2, 0.0, 0.8])) - .params() - .clone(), - ); - params - } - - fn invalid_params_examples() -> Vec> { - vec![ - S::Vector::<4>::from_array([0.0.into(), 0.0.into(), 0.0.into(), 0.0.into()]), - S::Vector::<4>::from_array([0.5.into(), 0.5.into(), 0.5.into(), 0.0.into()]), - S::Vector::<4>::from_array([0.5.into(), (-0.5).into(), 0.5.into(), 1.0.into()]), - ] - } - - fn are_params_valid(params: &S::Vector<4>) -> bool { - let norm = params.norm().real(); - (norm - 1.0).abs() < 1e-6 - } -} - -impl> manifold::traits::TangentImpl for Rotation3Impl { - fn tangent_examples() -> Vec> { - vec![ - S::Vector::<3>::from_c_array([0.0, 0.0, 0.0]), - S::Vector::<3>::from_c_array([1.0, 0.0, 0.0]), - S::Vector::<3>::from_c_array([0.0, 1.0, 0.0]), - S::Vector::<3>::from_c_array([0.0, 0.0, 1.0]), - S::Vector::<3>::from_c_array([0.5, 0.5, 0.1]), - S::Vector::<3>::from_c_array([-0.1, -0.5, -0.5]), - ] - } -} - -impl> IsLieGroupImpl for Rotation3Impl { - const IS_ORIGIN_PRESERVING: bool = true; - const IS_AXIS_DIRECTION_PRESERVING: bool = false; - const IS_DIRECTION_VECTOR_PRESERVING: bool = false; - const IS_SHAPE_PRESERVING: bool = true; - const IS_DISTANCE_PRESERVING: bool = true; - const IS_PARALLEL_LINE_PRESERVING: bool = true; - - fn identity_params() -> S::Vector<4> { - S::Vector::<4>::from_c_array([1.0, 0.0, 0.0, 0.0]) - } - - fn adj(params: &S::Vector<4>) -> S::Matrix<3, 3> { - Self::matrix(params) - } - - fn exp(omega: &S::Vector<3>) -> S::Vector<4> { - const EPS: f64 = 1e-8; - let theta_sq = omega.squared_norm(); - - let (imag_factor, real_factor) = if theta_sq.real() < EPS * EPS { - let theta_po4 = theta_sq.clone() * theta_sq.clone(); - ( - S::c(0.5) - S::c(1.0 / 48.0) * theta_sq.clone() - + S::c(1.0 / 3840.0) * theta_po4.clone(), - S::c(1.0) - S::c(1.0 / 8.0) * theta_sq + S::c(1.0 / 384.0) * theta_po4, - ) - } else { - let theta = theta_sq.sqrt(); - let half_theta: S = S::c(0.5) * theta.clone(); - (half_theta.clone().sin() / theta, half_theta.cos()) - }; - S::Vector::<4>::from_array([ - real_factor, - imag_factor.clone() * omega.get(0), - imag_factor.clone() * omega.get(1), - imag_factor * omega.get(2), - ]) - } - - fn log(params: &S::Vector<4>) -> S::Vector<3> { - const EPS: f64 = 1e-8; - let ivec: S::Vector<3> = params.get_fixed_rows::<3>(1); - - let squared_n = ivec.squared_norm(); - let w = params.get(0); - let w_real = w.real(); - - let two_atan_nbyd_by_n: S = if squared_n.real() < EPS * EPS { - assert!( - w_real.abs() > EPS, - "|params| should be close to 1. (w = {})", - w_real - ); - let w_sq = w.clone() * w.clone(); - S::c(2.0) / w.clone() - S::c(2.0 / 3.0) * squared_n / (w_sq * w) - } else { - let n = squared_n.sqrt(); - let atan_nbyw = if w_real < 0.0 { - -n.clone().atan2(-w) - } else { - n.clone().atan2(w) - }; - S::c(2.0) * atan_nbyw / n - }; - ivec.scaled(two_atan_nbyd_by_n) - } - - fn hat(omega: &S::Vector<3>) -> S::Matrix<3, 3> { - let o0 = omega.get(0); - let o1 = omega.get(1); - let o2 = omega.get(2); - - S::Matrix::from_array2([ - [S::zero(), -o2.clone(), o1.clone()], - [o2, S::zero(), -o0.clone()], - [-o1, o0, S::zero()], - ]) - } - - fn vee(omega_hat: &S::Matrix<3, 3>) -> S::Vector<3> { - S::Vector::<3>::from_array([ - omega_hat.get((2, 1)), - omega_hat.get((0, 2)), - omega_hat.get((1, 0)), - ]) - } - - fn inverse(params: &S::Vector<4>) -> S::Vector<4> { - S::Vector::from_array([ - params.get(0), - -params.get(1), - -params.get(2), - -params.get(3), - ]) - } - - fn transform(params: &S::Vector<4>, point: &S::Vector<3>) -> S::Vector<3> { - Self::matrix(params) * point.clone() - } - - fn to_ambient(point: &S::Vector<3>) -> S::Vector<3> { - point.clone() - } - - fn compact(params: &S::Vector<4>) -> S::Matrix<3, 3> { - Self::matrix(params) - } - - fn matrix(params: &S::Vector<4>) -> S::Matrix<3, 3> { - let ivec = params.get_fixed_rows::<3>(1); - let re = params.get(0); - - let unit_x = S::Vector::from_c_array([1.0, 0.0, 0.0]); - let unit_y = S::Vector::from_c_array([0.0, 1.0, 0.0]); - let unit_z = S::Vector::from_c_array([0.0, 0.0, 1.0]); - - let two = S::c(2.0); - - let uv_x: S::Vector<3> = cross::(ivec.clone(), unit_x.clone()).scaled(two.clone()); - let uv_y: S::Vector<3> = cross::(ivec.clone(), unit_y.clone()).scaled(two.clone()); - let uv_z: S::Vector<3> = cross::(ivec.clone(), unit_z.clone()).scaled(two); - - let col_x = unit_x + cross::(ivec.clone(), uv_x.clone()) + uv_x.scaled(re.clone()); - let col_y = unit_y + cross::(ivec.clone(), uv_y.clone()) + uv_y.scaled(re.clone()); - let col_z = unit_z + cross::(ivec.clone(), uv_z.clone()) + uv_z.scaled(re.clone()); - - S::Matrix::block_mat1x2::<1, 2>( - col_x.to_mat(), - S::Matrix::block_mat1x2(col_y.to_mat(), col_z.to_mat()), - ) - } - - fn ad(omega: &S::Vector<3>) -> S::Matrix<3, 3> { - Self::hat(omega) - } - - type GenG> = Rotation3Impl; - type RealG = Rotation3Impl; - type DualG = Rotation3Impl; - - fn group_mul(lhs_params: &S::Vector<4>, rhs_params: &S::Vector<4>) -> S::Vector<4> { - let lhs_re = lhs_params.get(0); - let rhs_re = rhs_params.get(0); - - let lhs_ivec = lhs_params.get_fixed_rows::<3>(1); - let rhs_ivec = rhs_params.get_fixed_rows::<3>(1); - - let re = lhs_re.clone() * rhs_re.clone() - lhs_ivec.clone().dot(rhs_ivec.clone()); - let ivec = - rhs_ivec.scaled(lhs_re) + lhs_ivec.scaled(rhs_re) + cross::(lhs_ivec, rhs_ivec); - - let mut params = S::Vector::block_vec2(re.to_vec(), ivec); - - if (params.norm().real() - 1.0).abs() > 1e-7 { - // todo: use tailor approximation for norm close to 1 - params = params.normalized(); - } - params - } - - fn has_shortest_path_ambiguity(params: &>::Vector<4>) -> bool { - let theta = Self::log(params).real().norm(); - (theta - std::f64::consts::PI).abs() < 1e-5 - } -} - -impl crate::traits::IsF64LieGroupImpl<3, 4, 3, 3> for Rotation3Impl { - fn dx_exp_x_at_0() -> MatF64<4, 3> { - MatF64::from_c_array2([ - [0.0, 0.0, 0.0], - [0.5, 0.0, 0.0], - [0.0, 0.5, 0.0], - [0.0, 0.0, 0.5], - ]) - } - - fn da_a_mul_b(_a: &VecF64<4>, b: &VecF64<4>) -> MatF64<4, 4> { - let b_real = b[0]; - let b_imag0 = b[1]; - let b_imag1 = b[2]; - let b_imag2 = b[3]; - - MatF64::<4, 4>::from_array2([ - [b_real, -b_imag0, -b_imag1, -b_imag2], - [b_imag0, b_real, b_imag2, -b_imag1], - [b_imag1, -b_imag2, b_real, b_imag0], - [b_imag2, b_imag1, -b_imag0, b_real], - ]) - } - - fn db_a_mul_b(a: &VecF64<4>, _b: &VecF64<4>) -> MatF64<4, 4> { - let a_real = a[0]; - let a_imag0 = a[1]; - let a_imag1 = a[2]; - let a_imag2 = a[3]; - - MatF64::<4, 4>::from_array2([ - [a_real, -a_imag0, -a_imag1, -a_imag2], - [a_imag0, a_real, -a_imag2, a_imag1], - [a_imag1, a_imag2, a_real, -a_imag0], - [a_imag2, -a_imag1, a_imag0, a_real], - ]) - } - - fn dx_exp_x_times_point_at_0(point: &sophus_calculus::types::VecF64<3>) -> MatF64<3, 3> { - Self::hat(&-point) - } - - fn dx_exp(omega: &VecF64<3>) -> MatF64<4, 3> { - let theta_sq = omega.squared_norm(); - - if theta_sq < 1e-6 { - return Self::dx_exp_x_at_0(); - } - - let omega_0 = omega[0]; - let omega_1 = omega[1]; - let omega_2 = omega[2]; - let theta = theta_sq.sqrt(); - let a = (0.5 * theta).sin() / theta; - let b = (0.5 * theta).cos() / (theta_sq) - 2.0 * (0.5 * theta).sin() / (theta_sq * theta); - - 0.5 * MatF64::from_array2([ - [-omega_0 * a, -omega_1 * a, -omega_2 * a], - [ - omega_0 * omega_0 * b + 2.0 * a, - omega_0 * omega_1 * b, - omega_0 * omega_2 * b, - ], - [ - omega_0 * omega_1 * b, - omega_1 * omega_1 * b + 2.0 * a, - omega_1 * omega_2 * b, - ], - [ - omega_0 * omega_2 * b, - omega_1 * omega_2 * b, - omega_2 * omega_2 * b + 2.0 * a, - ], - ]) - } - - fn dx_log_x(params: &VecF64<4>) -> MatF64<3, 4> { - let ivec: VecF64<3> = params.get_fixed_rows::<3>(1); - let w: f64 = params[0]; - let squared_n: f64 = ivec.squared_norm(); - - if squared_n < 1e-6 { - let mut m = MatF64::<3, 4>::zeros(); - m.fixed_columns_mut::<3>(1) - .copy_from(&(2.0 * MatF64::<3, 3>::identity())); - return m; - } - - let n: f64 = squared_n.sqrt(); - let theta = 2.0 * n.atan2(w); - - let dw_ivec_theta: VecF64<3> = ivec * (-2.0 / (squared_n + w * w)); - let factor = 2.0 * w / (squared_n * (squared_n + w * w)) - theta / (squared_n * n); - - let mut m = MatF64::<3, 4>::zeros(); - - m.set_column(0, &dw_ivec_theta); - m.fixed_columns_mut::<3>(1).copy_from( - &(MatF64::<3, 3>::identity() * theta / n + ivec * ivec.transpose() * factor), - ); - m - } -} - -impl> crate::traits::IsLieFactorGroupImpl for Rotation3Impl { - type GenFactorG> = Rotation3Impl; - type RealFactorG = Rotation3Impl; - type DualFactorG = Rotation3Impl; - - fn mat_v(omega: &S::Vector<3>) -> S::Matrix<3, 3> { - let theta_sq = omega.squared_norm(); - let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); - let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega.clone()); - if theta_sq.real() < 1e-6 { - S::Matrix::<3, 3>::identity() + mat_omega.scaled(S::c(0.5)) - } else { - let theta = theta_sq.clone().sqrt(); - S::Matrix::<3, 3>::identity() - + mat_omega.scaled((S::c(1.0) - theta.clone().cos()) / theta_sq.clone()) - + mat_omega_sq.scaled((theta.clone() - theta.clone().sin()) / (theta_sq * theta)) - } - } - - fn mat_v_inverse(omega: &S::Vector<3>) -> S::Matrix<3, 3> { - let theta_sq = omega.clone().dot(omega.clone()); - let mat_omega: S::Matrix<3, 3> = Rotation3Impl::::hat(omega); - let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega.clone()); - - if theta_sq.real() < 1e-6 { - S::Matrix::<3, 3>::identity() - mat_omega.scaled(S::c(0.5)) - + mat_omega_sq.scaled(S::c(1. / 12.)) - } else { - let theta = theta_sq.clone().sqrt(); - let half_theta = S::c(0.5) * theta.clone(); - - S::Matrix::<3, 3>::identity() - mat_omega.scaled(S::c(0.5)) - + mat_omega_sq.scaled( - (S::c(1.0) - - (S::c(0.5) * theta.clone() * half_theta.clone().cos()) - / half_theta.sin()) - / (theta.clone() * theta), - ) - } - } - - fn adj_of_translation(params: &S::Vector<4>, point: &S::Vector<3>) -> S::Matrix<3, 3> { - Rotation3Impl::::hat(point).mat_mul(Rotation3Impl::::matrix(params)) - } - - fn ad_of_translation(point: &S::Vector<3>) -> S::Matrix<3, 3> { - Rotation3Impl::::hat(point) - } -} - -impl crate::traits::IsF64LieFactorGroupImpl<3, 4, 3> for Rotation3Impl { - fn dx_mat_v(omega: &sophus_calculus::types::VecF64<3>) -> [MatF64<3, 3>; 3] { - let theta_sq = omega.squared_norm(); - let dt_mat_omega_pos_idx = [(2, 1), (0, 2), (1, 0)]; - let dt_mat_omega_neg_idx = [(1, 2), (2, 0), (0, 1)]; - if theta_sq.real() < 1e-6 { - let mut l = [MatF64::<3, 3>::zeros(); 3]; - - for i in 0..3 { - *l[i].get_mut(dt_mat_omega_pos_idx[i]).unwrap() += 0.5; - *l[i].get_mut(dt_mat_omega_neg_idx[i]).unwrap() -= 0.5; - - println!("l[i] = {:?}", l[i]) - } - - println!("l = {:?}", l); - - return l; - } - - let mat_omega: MatF64<3, 3> = Rotation3Impl::::hat(omega); - let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega); - - let theta = theta_sq.sqrt(); - let domega_theta = - VecF64::from_array([omega[0] / theta, omega[1] / theta, omega[2] / theta]); - - let a = (1.0 - theta.cos()) / theta_sq; - let dt_a = (-2.0 + 2.0 * theta.cos() + theta * theta.sin()) / (theta * theta_sq); - - let b = (theta - theta.sin()) / (theta_sq * theta); - let dt_b = -(2.0 * theta + theta * theta.cos() - 3.0 * theta.sin()) / theta.powi(4); - - let dt_mat_omega_sq = [ - MatF64::from_array2([ - [0.0, omega[1], omega[2]], - [omega[1], -2.0 * omega[0], 0.0], - [omega[2], 0.0, -2.0 * omega[0]], - ]), - MatF64::from_array2([ - [-2.0 * omega[1], omega[0], 0.0], - [omega[0], 0.0, omega[2]], - [0.0, omega[2], -2.0 * omega[1]], - ]), - MatF64::from_array2([ - [-2.0 * omega[2], 0.0, omega[0]], - [0.0, -2.0 * omega[2], omega[1]], - [omega[0], omega[1], 0.0], - ]), - ]; - - let mut l = [MatF64::<3, 3>::zeros(); 3]; - - for i in 0..3 { - l[i] = domega_theta[i] * dt_a * mat_omega; - println!("l[i] = {:?}", l[i]); - *l[i].get_mut(dt_mat_omega_pos_idx[i]).unwrap() += a; - *l[i].get_mut(dt_mat_omega_neg_idx[i]).unwrap() -= a; - println!("pl[i] = {:?}", l[i]); - l[i] += b * dt_mat_omega_sq[i] + domega_theta[i] * dt_b * mat_omega_sq; - } - - l - } - - fn dparams_matrix_times_point(params: &VecF64<4>, point: &VecF64<3>) -> MatF64<3, 4> { - let r = params[0]; - let ivec0 = params[1]; - let ivec1 = params[2]; - let ivec2 = params[3]; - - let p0 = point[0]; - let p1 = point[1]; - let p2 = point[2]; - - MatF64::from_array2([ - [ - 2.0 * ivec1 * p2 - 2.0 * ivec2 * p1, - 2.0 * ivec1 * p1 + 2.0 * ivec2 * p2, - 2.0 * r * p2 + 2.0 * ivec0 * p1 - 4.0 * ivec1 * p0, - -2.0 * r * p1 + 2.0 * ivec0 * p2 - 4.0 * ivec2 * p0, - ], - [ - -2.0 * ivec0 * p2 + 2.0 * ivec2 * p0, - -2.0 * r * p2 - 4.0 * ivec0 * p1 + 2.0 * ivec1 * p0, - 2.0 * ivec0 * p0 + 2.0 * ivec2 * p2, - 2.0 * r * p0 + 2.0 * ivec1 * p2 - 4.0 * ivec2 * p1, - ], - [ - 2.0 * ivec0 * p1 - 2.0 * ivec1 * p0, - 2.0 * r * p1 - 4.0 * ivec0 * p2 + 2.0 * ivec2 * p0, - -2.0 * r * p0 - 4.0 * ivec1 * p2 + 2.0 * ivec2 * p1, - 2.0 * ivec0 * p0 + 2.0 * ivec1 * p1, - ], - ]) - } - - fn dx_mat_v_inverse(omega: &sophus_calculus::types::VecF64<3>) -> [MatF64<3, 3>; 3] { - let theta_sq = omega.squared_norm(); - let theta = theta_sq.sqrt(); - let half_theta = 0.5 * theta; - let mat_omega: MatF64<3, 3> = Rotation3Impl::::hat(omega); - let mat_omega_sq = mat_omega.clone().mat_mul(mat_omega); - - let dt_mat_omega_pos_idx = [(2, 1), (0, 2), (1, 0)]; - let dt_mat_omega_neg_idx = [(1, 2), (2, 0), (0, 1)]; - - if theta_sq.real() < 1e-6 { - let mut l = [MatF64::<3, 3>::zeros(); 3]; - - for i in 0..3 { - *l[i].get_mut(dt_mat_omega_pos_idx[i]).unwrap() -= 0.5; - *l[i].get_mut(dt_mat_omega_neg_idx[i]).unwrap() += 0.5; - - println!("l[i] = {:?}", l[i]) - } - - println!("l = {:?}", l); - - return l; - } - - let domega_theta = - VecF64::from_array([omega[0] / theta, omega[1] / theta, omega[2] / theta]); - - let c = (1.0 - (0.5 * theta * half_theta.cos()) / (half_theta.sin())) / theta_sq; - - let dt_c = (-2.0 - + (0.25 * theta_sq) / (half_theta.sin() * half_theta.sin()) - + (half_theta * half_theta.cos()) / half_theta.sin()) - / theta.powi(3); - - let dt_mat_omega_sq = [ - MatF64::from_array2([ - [0.0, omega[1], omega[2]], - [omega[1], -2.0 * omega[0], 0.0], - [omega[2], 0.0, -2.0 * omega[0]], - ]), - MatF64::from_array2([ - [-2.0 * omega[1], omega[0], 0.0], - [omega[0], 0.0, omega[2]], - [0.0, omega[2], -2.0 * omega[1]], - ]), - MatF64::from_array2([ - [-2.0 * omega[2], 0.0, omega[0]], - [0.0, -2.0 * omega[2], omega[1]], - [omega[0], omega[1], 0.0], - ]), - ]; - - let mut l = [MatF64::<3, 3>::zeros(); 3]; - - for i in 0..3 { - l[i][dt_mat_omega_pos_idx[i]] += -0.5; - l[i][dt_mat_omega_neg_idx[i]] -= -0.5; - l[i] += dt_mat_omega_sq[i].scaled(c) + domega_theta[i] * mat_omega_sq.scaled(dt_c); - } - - l - } -} - -/// 3d rotation group - SO(3) -pub type Rotation3 = LieGroup>; - -/// 3d isometry implementation - SE(3) -pub type Isometry3Impl = crate::translation_product_product::TranslationProductGroupImpl< - S, - 6, - 7, - 3, - 4, - 3, - 4, - Rotation3Impl, ->; - -mod tests { - - #[test] - fn rotation3_prop_tests() { - use super::Rotation3; - use sophus_calculus::dual::dual_scalar::Dual; - - Rotation3::::test_suite(); - Rotation3::::test_suite(); - Rotation3::::real_test_suite(); - Rotation3::::real_factor_test_suite(); - } -} diff --git a/crates/sophus_lie/src/traits.rs b/crates/sophus_lie/src/traits.rs index b0baf9c..1868816 100644 --- a/crates/sophus_lie/src/traits.rs +++ b/crates/sophus_lie/src/traits.rs @@ -1,13 +1,11 @@ use std::fmt::Debug; -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::manifold::traits::TangentImpl; -use sophus_calculus::manifold::{self}; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use sophus_core::calculus::manifold::traits::TangentImpl; +use sophus_core::calculus::manifold::{self}; +use sophus_core::linalg::scalar::IsRealScalar; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::params::HasParams; +use sophus_core::params::ParamsImpl; /// Lie Group implementation trait /// @@ -34,9 +32,9 @@ pub trait IsLieGroupImpl< /// Generic scalar, real scalar, and dual scalar type GenG>: IsLieGroupImpl; /// Real scalar - type RealG: IsLieGroupImpl; - /// Dual scalar - for automatic differentiation - type DualG: IsLieGroupImpl; + type RealG: IsLieGroupImpl; + /// DualScalar scalar - for automatic differentiation + type DualG: IsLieGroupImpl; /// is transformation origin preserving? const IS_ORIGIN_PRESERVING: bool; @@ -56,9 +54,6 @@ pub trait IsLieGroupImpl< // Manifold / Lie Group concepts - /// are there multiple shortest paths to the identity? - fn has_shortest_path_ambiguity(params: &S::Vector) -> bool; - /// group adjoint fn adj(params: &S::Vector) -> S::Matrix; @@ -101,30 +96,35 @@ pub trait IsLieGroupImpl< } /// Lie Group implementation trait for real scalar, f64 -pub trait IsF64LieGroupImpl< +pub trait IsRealLieGroupImpl< + S: IsRealScalar, const DOF: usize, const PARAMS: usize, const POINT: usize, const AMBIENT: usize, ->: IsLieGroupImpl + const BATCH_SIZE: usize, +>: IsLieGroupImpl { /// derivative of group multiplication with respect to the first argument - fn da_a_mul_b(a: &VecF64, b: &VecF64) -> MatF64; + fn da_a_mul_b(a: &S::Vector, b: &S::Vector) -> S::Matrix; /// derivative of group multiplication with respect to the second argument - fn db_a_mul_b(a: &VecF64, b: &VecF64) -> MatF64; + fn db_a_mul_b(a: &S::Vector, b: &S::Vector) -> S::Matrix; /// derivative of exponential map - fn dx_exp(tangent: &VecF64) -> MatF64; + fn dx_exp(tangent: &S::Vector) -> S::Matrix; /// derivative of exponential map at the identity - fn dx_exp_x_at_0() -> MatF64; + fn dx_exp_x_at_0() -> S::Matrix; /// derivative of logarithmic map - fn dx_log_x(params: &VecF64) -> MatF64; + fn dx_log_x(params: &S::Vector) -> S::Matrix; /// derivative of exponential map times a point at the identity - fn dx_exp_x_times_point_at_0(point: &VecF64) -> MatF64; + fn dx_exp_x_times_point_at_0(point: S::Vector) -> S::Matrix; + + /// are there multiple shortest paths to the identity? + fn has_shortest_path_ambiguity(params: &S::Vector) -> S::Mask; } /// Lie Factor Group @@ -147,9 +147,9 @@ pub trait IsLieFactorGroupImpl< BATCH_SIZE, >; /// Real scalar - type RealFactorG: IsLieFactorGroupImpl; - /// Dual scalar - for automatic differentiation - type DualFactorG: IsLieFactorGroupImpl; + type RealFactorG: IsLieFactorGroupImpl; + /// DualScalar scalar - for automatic differentiation + type DualFactorG: IsLieFactorGroupImpl; /// V matrix - used by semi-direct product exponential fn mat_v(tangent: &S::Vector) -> S::Matrix; @@ -168,22 +168,28 @@ pub trait IsLieFactorGroupImpl< } /// Lie Factor Group implementation trait for real scalar, f64 -pub trait IsF64LieFactorGroupImpl: - IsLieGroupImpl - + IsLieFactorGroupImpl - + IsF64LieGroupImpl +pub trait IsRealLieFactorGroupImpl< + S: IsRealScalar, + const DOF: usize, + const PARAMS: usize, + const POINT: usize, + const BATCH_SIZE: usize, +>: + IsLieGroupImpl + + IsLieFactorGroupImpl + + IsRealLieGroupImpl { /// derivative of V matrix - fn dx_mat_v(tangent: &VecF64) -> [MatF64; DOF]; + fn dx_mat_v(tangent: &S::Vector) -> [S::Matrix; DOF]; /// derivative of V matrix inverse - fn dx_mat_v_inverse(tangent: &VecF64) -> [MatF64; DOF]; + fn dx_mat_v_inverse(tangent: &S::Vector) -> [S::Matrix; DOF]; /// derivative of group transformation times a point with respect to the group parameters fn dparams_matrix_times_point( - params: &VecF64, - point: &VecF64, - ) -> MatF64; + params: &S::Vector, + point: &S::Vector, + ) -> S::Matrix; } /// Lie Group trait @@ -201,9 +207,18 @@ pub trait IsLieGroup< /// Lie Group implementation with generic scalar, real scalar, and dual scalar type GenG>: IsLieGroupImpl; /// Lie Group implementation- with real scalar - type RealG: IsLieGroupImpl; + type RealG: IsLieGroupImpl; /// Lie Group implementation with dual scalar - for automatic differentiation - type DualG: IsLieGroupImpl; + type DualG: IsLieGroupImpl; + + /// degree of freedom + const DOF: usize; + /// number of parameters + const PARAMS: usize; + /// point dimension + const POINT: usize; + /// ambient dimension + const AMBIENT: usize; /// Get the Lie Group type GenGroup, G2: IsLieGroupImpl>: IsLieGroup< @@ -215,9 +230,9 @@ pub trait IsLieGroup< BATCH_SIZE >; /// Lie Group with real scalar - type RealGroup: IsLieGroup; + type RealGroup: IsLieGroup; /// Lie Group with dual scalar - for automatic differentiation - type DualGroup: IsLieGroup; + type DualGroup: IsLieGroup; } /// Lie Group trait for real scalar, f64 diff --git a/crates/sophus_opt/Cargo.toml b/crates/sophus_opt/Cargo.toml index 6800df5..0db3d4c 100644 --- a/crates/sophus_opt/Cargo.toml +++ b/crates/sophus_opt/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_calculus.workspace = true +sophus_core.workspace = true sophus_image.workspace = true sophus_lie.workspace = true sophus_sensor.workspace = true diff --git a/crates/sophus_opt/src/block.rs b/crates/sophus_opt/src/block.rs index b1f86bb..06959bf 100644 --- a/crates/sophus_opt/src/block.rs +++ b/crates/sophus_opt/src/block.rs @@ -1,7 +1,6 @@ use nalgebra::Const; - -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use sophus_core::linalg::MatF64; +use sophus_core::linalg::VecF64; /// Range of a block #[derive(Clone, Debug, Copy)] diff --git a/crates/sophus_opt/src/example_problems/cam_calib.rs b/crates/sophus_opt/src/example_problems/cam_calib.rs index d856828..a5f901f 100644 --- a/crates/sophus_opt/src/example_problems/cam_calib.rs +++ b/crates/sophus_opt/src/example_problems/cam_calib.rs @@ -10,32 +10,32 @@ use crate::robust_kernel::HuberKernel; use crate::variables::VarFamily; use crate::variables::VarKind; use crate::variables::VarPoolBuilder; - -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use sophus_core::linalg::MatF64; +use sophus_core::linalg::VecF64; use sophus_image::image_view::ImageSize; -use sophus_lie::isometry3::Isometry3; -use sophus_lie::rotation3::Rotation3; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::groups::rotation3::Rotation3; +use sophus_sensor::camera_enum::perspective_camera::PinholeCamera; + use sophus_lie::traits::IsTranslationProductGroup; -use sophus_sensor::perspective_camera::PinholeCamera; use std::collections::HashMap; /// Camera calibration problem #[derive(Clone)] pub struct CamCalibProblem { /// intrinsics - pub intrinsics: PinholeCamera, + pub intrinsics: PinholeCamera, /// world from camera isometries - pub world_from_cameras: Vec>, + pub world_from_cameras: Vec>, /// points in world pub points_in_world: Vec>, /// observations pub observations: Vec, /// true intrinsics - pub true_intrinsics: PinholeCamera, + pub true_intrinsics: PinholeCamera, /// true world from camera isometries - pub true_world_from_cameras: Vec>, + pub true_world_from_cameras: Vec>, /// true points in world pub true_points_in_world: Vec>, } @@ -62,7 +62,7 @@ impl CamCalibProblem { height: 480, }; - let true_intrinsics = PinholeCamera::::from_params_and_size( + let true_intrinsics = PinholeCamera::::from_params_and_size( &VecF64::<4>::new(600.0, 600.0, 320.0, 240.0), image_size, ); @@ -85,8 +85,9 @@ impl CamCalibProblem { let true_uv_in_img0_proof = true_intrinsics.cam_proj(&true_point_in_cam0); approx::assert_abs_diff_eq!(true_uv_in_img0, true_uv_in_img0_proof, epsilon = 0.1); - let true_cam1_from_cam0 = - true_world_from_cameras[1].inverse() * &true_world_from_cameras[0]; + let true_cam1_from_cam0 = true_world_from_cameras[1] + .inverse() + .group_mul(&true_world_from_cameras[0]); let true_point_in_cam1 = true_cam1_from_cam0.transform(&true_point_in_cam0); let true_uv_in_img1 = true_intrinsics.cam_proj(&true_point_in_cam1); let img_noise = VecF64::<2>::new(rng.gen::() - 0.5, rng.gen::() - 0.5); @@ -119,7 +120,7 @@ impl CamCalibProblem { Self { world_from_cameras: vec![ - Isometry3::::identity(), + Isometry3::::identity(), true_world_from_cameras[1], true_world_from_cameras[1], ], @@ -140,14 +141,14 @@ impl CamCalibProblem { terms: self.observations.clone(), }; - let cam_family: VarFamily> = + let cam_family: VarFamily> = VarFamily::new(intrinsics_var_kind, vec![self.intrinsics]); let mut id = HashMap::new(); id.insert(0, ()); id.insert(1, ()); - let pose_family: VarFamily> = VarFamily::new_with_const_ids( + let pose_family: VarFamily> = VarFamily::new_with_const_ids( VarKind::Free, self.world_from_cameras.clone(), id.clone(), @@ -178,7 +179,7 @@ impl CamCalibProblem { }, ); - let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); + let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); approx::assert_abs_diff_eq!( refined_world_from_robot[2].translation(), @@ -190,7 +191,7 @@ impl CamCalibProblem { /// optimize with priors pub fn optimize_with_priors(&self) { let priors = - CostSignature::<1, (Isometry3, MatF64<6, 6>), Isometry3PriorTermSignature> { + CostSignature::<1, (Isometry3, MatF64<6, 6>), Isometry3PriorTermSignature> { family_names: ["poses".into()], terms: vec![ Isometry3PriorTermSignature { @@ -215,10 +216,10 @@ impl CamCalibProblem { terms: self.observations.clone(), }; - let cam_family: VarFamily> = + let cam_family: VarFamily> = VarFamily::new(VarKind::Conditioned, vec![self.intrinsics]); - let pose_family: VarFamily> = + let pose_family: VarFamily> = VarFamily::new(VarKind::Free, self.world_from_cameras.clone()); let point_family: VarFamily> = @@ -242,7 +243,7 @@ impl CamCalibProblem { }, ); - let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); + let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); println!( "refined_world_from_robot[0].translation(): {:?}", diff --git a/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs b/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs index dad21a0..a48ce97 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/isometry2_prior.rs @@ -4,15 +4,15 @@ use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; - -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::dual::dual_vector::DualV; -use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry2::Isometry2; +use sophus_core::calculus::dual::dual_scalar::DualScalar; +use sophus_core::calculus::dual::dual_vector::DualVector; +use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::scalar::IsSingleScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; +use sophus_core::params::HasParams; +use sophus_lie::groups::isometry2::Isometry2; /// Cost function for a prior on an 2d isometry #[derive(Copy, Clone)] @@ -22,13 +22,13 @@ pub struct Isometry2PriorCostFn {} #[derive(Clone)] pub struct Isometry2PriorTermSignature { /// prior mean - pub isometry_prior_mean: Isometry2, + pub isometry_prior_mean: Isometry2, /// entity index pub entity_indices: [usize; 1], } impl IsTermSignature<1> for Isometry2PriorTermSignature { - type Constants = Isometry2; + type Constants = Isometry2; fn c_ref(&self) -> &Self::Constants { &self.isometry_prior_mean @@ -41,39 +41,37 @@ impl IsTermSignature<1> for Isometry2PriorTermSignature { const DOF_TUPLE: [i64; 1] = [3]; } -fn res_fn>( - isometry: Isometry2, - isometry_prior_mean: Isometry2, +fn res_fn>( + isometry: Isometry2, + isometry_prior_mean: Isometry2, ) -> Scalar::Vector<3> { - (isometry * &isometry_prior_mean.inverse()).log() + Isometry2::::group_mul(&isometry, &isometry_prior_mean.inverse()).log() } -impl IsResidualFn<3, 1, Isometry2, Isometry2> for Isometry2PriorCostFn { +impl IsResidualFn<3, 1, Isometry2, Isometry2> for Isometry2PriorCostFn { fn eval( &self, - args: Isometry2, + args: Isometry2, var_kinds: [VarKind; 1], robust_kernel: Option, - isometry_prior_mean: &Isometry2, + isometry_prior_mean: &Isometry2, ) -> Term<3, 1> { - let isometry: Isometry2 = args; + let isometry: Isometry2 = args; let residual = res_fn(isometry, *isometry_prior_mean); - let dx_res_fn = |x: DualV<3>| -> DualV<3> { - let pp = Isometry2::::exp(&x).group_mul(&isometry.to_dual_c()); + let dx_res_fn = |x: DualVector<3>| -> DualVector<3> { + let pp = Isometry2::::exp(&x).group_mul(&isometry.to_dual_c()); res_fn( pp, - Isometry2::from_params(&DualV::c(*isometry_prior_mean.params())), + Isometry2::from_params(&DualVector::from_real_vector( + *isometry_prior_mean.params(), + )), ) }; let zeros: VecF64<3> = VecF64::<3>::zeros(); - (|| VectorValuedMapFromVector::static_fw_autodiff(dx_res_fn, zeros),).make_term( - var_kinds, - residual, - robust_kernel, - None, - ) + (|| VectorValuedMapFromVector::::static_fw_autodiff(dx_res_fn, zeros),) + .make_term(var_kinds, residual, robust_kernel, None) } } diff --git a/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs b/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs index bce1538..a5ed7c5 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/isometry3_prior.rs @@ -4,16 +4,16 @@ use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; - -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::dual::dual_vector::DualV; -use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry3::Isometry3; +use sophus_core::calculus::dual::dual_scalar::DualScalar; +use sophus_core::calculus::dual::dual_vector::DualVector; +use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::scalar::IsSingleScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::MatF64; +use sophus_core::linalg::VecF64; +use sophus_core::params::HasParams; +use sophus_lie::groups::isometry3::Isometry3; /// Cost function for a prior on an 3d isometry #[derive(Copy, Clone)] @@ -23,13 +23,13 @@ pub struct Isometry3PriorCostFn {} #[derive(Clone)] pub struct Isometry3PriorTermSignature { /// prior mean - pub isometry_prior: (Isometry3, MatF64<6, 6>), + pub isometry_prior: (Isometry3, MatF64<6, 6>), /// entity index pub entity_indices: [usize; 1], } impl IsTermSignature<1> for Isometry3PriorTermSignature { - type Constants = (Isometry3, MatF64<6, 6>); + type Constants = (Isometry3, MatF64<6, 6>); fn c_ref(&self) -> &Self::Constants { &self.isometry_prior @@ -42,39 +42,37 @@ impl IsTermSignature<1> for Isometry3PriorTermSignature { const DOF_TUPLE: [i64; 1] = [3]; } -fn res_fn>( - isometry: Isometry3, - isometry_prior_mean: Isometry3, +fn res_fn + IsSingleScalar>( + isometry: Isometry3, + isometry_prior_mean: Isometry3, ) -> Scalar::Vector<6> { - (isometry * &isometry_prior_mean.inverse()).log() + Isometry3::::group_mul(&isometry, &isometry_prior_mean.inverse()).log() } -impl IsResidualFn<6, 1, Isometry3, (Isometry3, MatF64<6, 6>)> for Isometry3PriorCostFn { +impl IsResidualFn<6, 1, Isometry3, (Isometry3, MatF64<6, 6>)> + for Isometry3PriorCostFn +{ fn eval( &self, - args: Isometry3, + args: Isometry3, var_kinds: [VarKind; 1], robust_kernel: Option, - isometry_prior: &(Isometry3, MatF64<6, 6>), + isometry_prior: &(Isometry3, MatF64<6, 6>), ) -> Term<6, 1> { - let isometry: Isometry3 = args; + let isometry: Isometry3 = args; let residual = res_fn(isometry, isometry_prior.0); - let dx_res_fn = |x: DualV<6>| -> DualV<6> { - let pp = Isometry3::::exp(&x).group_mul(&isometry.to_dual_c()); + let dx_res_fn = |x: DualVector<6>| -> DualVector<6> { + let pp = Isometry3::::exp(&x).group_mul(&isometry.to_dual_c()); res_fn( pp, - Isometry3::from_params(&DualV::c(*isometry_prior.0.params())), + Isometry3::from_params(&DualVector::from_real_vector(*isometry_prior.0.params())), ) }; let zeros: VecF64<6> = VecF64::<6>::zeros(); - (|| VectorValuedMapFromVector::static_fw_autodiff(dx_res_fn, zeros),).make_term( - var_kinds, - residual, - robust_kernel, - Some(isometry_prior.1), - ) + (|| VectorValuedMapFromVector::::static_fw_autodiff(dx_res_fn, zeros),) + .make_term(var_kinds, residual, robust_kernel, Some(isometry_prior.1)) } } diff --git a/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs b/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs index bd5a365..bed652b 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/pose_graph.rs @@ -4,30 +4,34 @@ use crate::robust_kernel; use crate::term::MakeTerm; use crate::term::Term; use crate::variables::VarKind; - -use sophus_calculus::types::scalar::IsScalar; -use sophus_lie::isometry2::Isometry2; +use sophus_core::linalg::scalar::IsSingleScalar; +use sophus_lie::groups::isometry2::Isometry2; /// residual function for a pose-pose constraint -pub fn res_fn>( - world_from_pose_a: Isometry2, - world_from_pose_b: Isometry2, - pose_a_from_pose_b: Isometry2, +pub fn res_fn( + world_from_pose_a: Isometry2, + world_from_pose_b: Isometry2, + pose_a_from_pose_b: Isometry2, ) -> S::Vector<3> { - (world_from_pose_a.inverse() * &world_from_pose_b * &pose_a_from_pose_b.inverse()).log() + (world_from_pose_a + .inverse() + .group_mul(&world_from_pose_b.group_mul(&pose_a_from_pose_b.inverse()))) + .log() } /// Cost function for 2d pose graph #[derive(Copy, Clone, Debug)] pub struct PoseGraphCostFn {} -impl IsResidualFn<12, 2, (Isometry2, Isometry2), Isometry2> for PoseGraphCostFn { +impl IsResidualFn<12, 2, (Isometry2, Isometry2), Isometry2> + for PoseGraphCostFn +{ fn eval( &self, - world_from_pose_x: (Isometry2, Isometry2), + world_from_pose_x: (Isometry2, Isometry2), var_kinds: [VarKind; 2], robust_kernel: Option, - obs: &Isometry2, + obs: &Isometry2, ) -> Term<12, 2> { let world_from_pose_a = world_from_pose_x.0; let world_from_pose_b = world_from_pose_x.1; @@ -38,13 +42,13 @@ impl IsResidualFn<12, 2, (Isometry2, Isometry2), Isometry2> for P || { -Isometry2::dx_log_a_exp_x_b_at_0( &world_from_pose_a.inverse(), - &(world_from_pose_b * &obs.inverse()), + &world_from_pose_b.group_mul(&obs.inverse()), ) }, || { Isometry2::dx_log_a_exp_x_b_at_0( &world_from_pose_a.inverse(), - &(world_from_pose_b * &obs.inverse()), + &world_from_pose_b.group_mul(&obs.inverse()), ) }, ) @@ -56,13 +60,13 @@ impl IsResidualFn<12, 2, (Isometry2, Isometry2), Isometry2> for P #[derive(Debug, Clone)] pub struct PoseGraphCostTermSignature { /// 2d relative pose constraint - pub pose_a_from_pose_b: Isometry2, + pub pose_a_from_pose_b: Isometry2, /// ids of the two poses pub entity_indices: [usize; 2], } impl IsTermSignature<2> for PoseGraphCostTermSignature { - type Constants = Isometry2; + type Constants = Isometry2; fn c_ref(&self) -> &Self::Constants { &self.pose_a_from_pose_b diff --git a/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs b/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs index 75767db..d3491a4 100644 --- a/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs +++ b/crates/sophus_opt/src/example_problems/cost_fn/reprojection.rs @@ -5,21 +5,21 @@ use crate::term::MakeTerm; use crate::term::Term; use crate::variables::IsVariable; use crate::variables::VarKind; - -use sophus_calculus::dual::dual_scalar::Dual; -use sophus_calculus::dual::dual_vector::DualV; -use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromVector; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry3::Isometry3; -use sophus_sensor::perspective_camera::PinholeCamera; +use sophus_core::calculus::dual::dual_scalar::DualScalar; +use sophus_core::calculus::dual::dual_vector::DualVector; +use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::scalar::IsSingleScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_sensor::camera_enum::perspective_camera::PinholeCamera; /// Camera re-projection cost function #[derive(Copy, Clone)] pub struct ReprojectionCostFn {} -impl IsVariable for PinholeCamera { +impl IsVariable for PinholeCamera { const DOF: usize = 4; fn update(&mut self, delta: nalgebra::DVectorView) { @@ -28,13 +28,15 @@ impl IsVariable for PinholeCamera { } } -fn res_fn>( - intrinscs: PinholeCamera, - world_from_camera: Isometry3, +fn res_fn>( + intrinscs: PinholeCamera, + world_from_camera: Isometry3, point_in_world: Scalar::Vector<3>, uv_in_image: Scalar::Vector<2>, ) -> Scalar::Vector<2> { - let point_in_cam = world_from_camera.inverse().transform(&point_in_world); + let point_in_cam = world_from_camera + .inverse() + .transform(&point_in_world.vector()); uv_in_image - intrinscs.cam_proj(&point_in_cam) } @@ -61,14 +63,14 @@ impl IsTermSignature<3> for ReprojTermSignature { const DOF_TUPLE: [i64; 3] = [4, 6, 3]; } -impl IsResidualFn<13, 3, (PinholeCamera, Isometry3, VecF64<3>), VecF64<2>> +impl IsResidualFn<13, 3, (PinholeCamera, Isometry3, VecF64<3>), VecF64<2>> for ReprojectionCostFn { fn eval( &self, (intrinsics, world_from_camera_pose, point_in_world): ( - PinholeCamera, - Isometry3, + PinholeCamera, + Isometry3, VecF64<3>, ), var_kinds: [VarKind; 3], @@ -84,43 +86,58 @@ impl IsResidualFn<13, 3, (PinholeCamera, Isometry3, VecF64<3>), VecF64 ); // calculate jacobian wrt intrinsics - let d0_res_fn = |x: DualV<4>| -> DualV<2> { + let d0_res_fn = |x: DualVector<4>| -> DualVector<2> { res_fn( - PinholeCamera::::from_params_and_size(&x, intrinsics.image_size()), + PinholeCamera::::from_params_and_size(&x, intrinsics.image_size()), world_from_camera_pose.to_dual_c(), - DualV::c(point_in_world), - DualV::c(*uv_in_image), + DualVector::from_real_vector(point_in_world), + DualVector::from_real_vector(*uv_in_image), ) }; // calculate jacobian wrt world_from_camera_pose - let d1_res_fn = |x: DualV<6>| -> DualV<2> { + let d1_res_fn = |x: DualVector<6>| -> DualVector<2> { res_fn( - PinholeCamera::::from_params_and_size( - &DualV::c(*intrinsics.params()), + PinholeCamera::::from_params_and_size( + &DualVector::from_real_vector(*intrinsics.params()), intrinsics.image_size(), ), - Isometry3::::exp(&x) * &world_from_camera_pose.to_dual_c(), - DualV::c(point_in_world), - DualV::c(*uv_in_image), + Isometry3::::exp(&x).group_mul(&world_from_camera_pose.to_dual_c()), + DualVector::from_real_vector(point_in_world), + DualVector::from_real_vector(*uv_in_image), ) }; // calculate jacobian wrt point_in_world - let d2_res_fn = |x: DualV<3>| -> DualV<2> { + let d2_res_fn = |x: DualVector<3>| -> DualVector<2> { res_fn( - PinholeCamera::::from_params_and_size( - &DualV::c(*intrinsics.params()), + PinholeCamera::::from_params_and_size( + &DualVector::from_real_vector(*intrinsics.params()), intrinsics.image_size(), ), world_from_camera_pose.to_dual_c(), x, - DualV::c(*uv_in_image), + DualVector::from_real_vector(*uv_in_image), ) }; ( - || VectorValuedMapFromVector::static_fw_autodiff(d0_res_fn, *intrinsics.params()), - || VectorValuedMapFromVector::static_fw_autodiff(d1_res_fn, VecF64::<6>::zeros()), - || VectorValuedMapFromVector::static_fw_autodiff(d2_res_fn, point_in_world), + || { + VectorValuedMapFromVector::::static_fw_autodiff( + d0_res_fn, + *intrinsics.params(), + ) + }, + || { + VectorValuedMapFromVector::::static_fw_autodiff( + d1_res_fn, + VecF64::<6>::zeros(), + ) + }, + || { + VectorValuedMapFromVector::::static_fw_autodiff( + d2_res_fn, + point_in_world, + ) + }, ) .make_term(var_kinds, residual, robust_kernel, None) } diff --git a/crates/sophus_opt/src/example_problems/pose_circle.rs b/crates/sophus_opt/src/example_problems/pose_circle.rs index 36ce489..dddd656 100644 --- a/crates/sophus_opt/src/example_problems/pose_circle.rs +++ b/crates/sophus_opt/src/example_problems/pose_circle.rs @@ -7,9 +7,9 @@ use crate::variables::VarKind; use crate::variables::VarPool; use crate::variables::VarPoolBuilder; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry2::Isometry2; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; +use sophus_lie::groups::isometry2::Isometry2; use std::collections::HashMap; use super::cost_fn::pose_graph::PoseGraphCostFn; @@ -18,11 +18,12 @@ use super::cost_fn::pose_graph::PoseGraphCostFn; #[derive(Debug, Clone)] pub struct PoseCircleProblem { /// true poses - pub true_world_from_robot: Vec>, + pub true_world_from_robot: Vec>, /// estimated poses - pub est_world_from_robot: Vec>, + pub est_world_from_robot: Vec>, /// pose-pose constraints - pub obs_pose_a_from_pose_b_poses: CostSignature<2, Isometry2, PoseGraphCostTermSignature>, + pub obs_pose_a_from_pose_b_poses: + CostSignature<2, Isometry2, PoseGraphCostTermSignature>, } impl Default for PoseCircleProblem { @@ -37,7 +38,7 @@ impl PoseCircleProblem { let mut true_world_from_robot_poses = vec![]; let mut est_world_from_robot_poses = vec![]; let mut obs_pose_a_from_pose_b_poses = - CostSignature::<2, Isometry2, PoseGraphCostTermSignature> { + CostSignature::<2, Isometry2, PoseGraphCostTermSignature> { family_names: ["poses".into(), "poses".into()], terms: vec![], }; @@ -49,7 +50,7 @@ impl PoseCircleProblem { let angle = frac * std::f64::consts::TAU; let x = radius * angle.cos(); let y = radius * angle.sin(); - let p = VecF64::<3>::from_c_array([x, y, 0.1 * angle]); + let p = VecF64::<3>::from_real_array([x, y, 0.1 * angle]); true_world_from_robot_poses.push(Isometry2::exp(&p)); } @@ -59,9 +60,12 @@ impl PoseCircleProblem { let true_world_from_pose_a = true_world_from_robot_poses[a_idx]; let true_world_from_pose_b = true_world_from_robot_poses[b_idx]; - let p = VecF64::<3>::from_c_array([0.001, 0.001, 0.0001]); - let pose_a_from_pose_b = - Isometry2::exp(&p) * &true_world_from_pose_a.inverse() * (&true_world_from_pose_b); + let p = VecF64::<3>::from_real_array([0.001, 0.001, 0.0001]); + let pose_a_from_pose_b = Isometry2::exp(&p).group_mul( + &true_world_from_pose_a + .inverse() + .group_mul(&true_world_from_pose_b), + ); obs_pose_a_from_pose_b_poses .terms @@ -82,8 +86,9 @@ impl PoseCircleProblem { let world_from_pose_a = est_world_from_robot_poses[a_idx]; let pose_a_from_pose_b = obs.pose_a_from_pose_b; - let p = VecF64::<3>::from_c_array([0.1, 0.1, 0.1]); - let world_from_pose_b = Isometry2::exp(&p) * &world_from_pose_a * &pose_a_from_pose_b; + let p = VecF64::<3>::from_real_array([0.1, 0.1, 0.1]); + let world_from_pose_b = + Isometry2::exp(&p).group_mul(&world_from_pose_a.group_mul(&pose_a_from_pose_b)); est_world_from_robot_poses.push(world_from_pose_b); } @@ -101,7 +106,7 @@ impl PoseCircleProblem { } /// Calculate the error of the current estimate - pub fn calc_error(&self, est_world_from_robot: &Vec>) -> f64 { + pub fn calc_error(&self, est_world_from_robot: &Vec>) -> f64 { let mut res_err = 0.0; for obs in self.obs_pose_a_from_pose_b_poses.terms.clone() { let residual = super::cost_fn::pose_graph::res_fn( @@ -120,7 +125,7 @@ impl PoseCircleProblem { let mut constants = HashMap::new(); constants.insert(0, ()); - let family: VarFamily> = VarFamily::new_with_const_ids( + let family: VarFamily> = VarFamily::new_with_const_ids( VarKind::Free, self.est_world_from_robot.clone(), constants, @@ -142,22 +147,16 @@ impl PoseCircleProblem { } } -mod tests { - - #[test] - fn simple_prior_opt_tests() { - use super::PoseCircleProblem; - use sophus_lie::isometry2::Isometry2; +#[test] +fn pose_circle_opt_tests() { + let pose_graph = PoseCircleProblem::new(2500); - let pose_graph = PoseCircleProblem::new(2500); + let res_err = pose_graph.calc_error(&pose_graph.est_world_from_robot); + assert!(res_err > 1.0, "{} > thr?", res_err); - let res_err = pose_graph.calc_error(&pose_graph.est_world_from_robot); - assert!(res_err > 1.0, "{} > thr?", res_err); + let up_var_pool = pose_graph.optimize(); + let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); - let up_var_pool = pose_graph.optimize(); - let refined_world_from_robot = up_var_pool.get_members::>("poses".into()); - - let res_err = pose_graph.calc_error(&refined_world_from_robot); - assert!(res_err < 0.05, "{} < thr?", res_err); - } + let res_err = pose_graph.calc_error(&refined_world_from_robot); + assert!(res_err < 0.05, "{} < thr?", res_err); } diff --git a/crates/sophus_opt/src/example_problems/simple_prior.rs b/crates/sophus_opt/src/example_problems/simple_prior.rs index aecd436..b1180ad 100644 --- a/crates/sophus_opt/src/example_problems/simple_prior.rs +++ b/crates/sophus_opt/src/example_problems/simple_prior.rs @@ -9,19 +9,18 @@ use crate::nlls::OptParams; use crate::variables::VarFamily; use crate::variables::VarKind; use crate::variables::VarPoolBuilder; - -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry2::Isometry2; -use sophus_lie::isometry3::Isometry3; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::MatF64; +use sophus_core::linalg::VecF64; +use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::groups::isometry3::Isometry3; /// Simple 2D isometry prior problem pub struct SimpleIso2PriorProblem { /// True world from robot isometry - pub true_world_from_robot: Isometry2, + pub true_world_from_robot: Isometry2, /// Estimated world from robot isometry - pub est_world_from_robot: Isometry2, + pub est_world_from_robot: Isometry2, } impl Default for SimpleIso2PriorProblem { @@ -32,11 +31,11 @@ impl Default for SimpleIso2PriorProblem { impl SimpleIso2PriorProblem { fn new() -> Self { - let p = VecF64::<3>::from_c_array([0.2, 0.0, 1.0]); - let true_world_from_robot = Isometry2::::exp(&p); + let p = VecF64::<3>::from_f64_array([0.2, 0.0, 1.0]); + let true_world_from_robot = Isometry2::::exp(&p); Self { true_world_from_robot, - est_world_from_robot: Isometry2::::identity(), + est_world_from_robot: Isometry2::::identity(), } } @@ -48,12 +47,12 @@ impl SimpleIso2PriorProblem { }]; let obs_pose_a_from_pose_b_poses = - CostSignature::<1, Isometry2, Isometry2PriorTermSignature> { + CostSignature::<1, Isometry2, Isometry2PriorTermSignature> { family_names: ["poses".into()], terms: cost_signature, }; - let family: VarFamily> = + let family: VarFamily> = VarFamily::new(VarKind::Free, vec![self.est_world_from_robot]); let families = VarPoolBuilder::new().add_family("poses", family).build(); @@ -75,7 +74,7 @@ impl SimpleIso2PriorProblem { initial_lm_nu: 1e-6, // if lm prior param is tiny }, ); - let refined_world_from_robot = up_families.get_members::>("poses".into()); + let refined_world_from_robot = up_families.get_members::>("poses".into()); approx::assert_abs_diff_eq!( self.true_world_from_robot.compact(), @@ -88,9 +87,9 @@ impl SimpleIso2PriorProblem { /// Simple 3D isometry prior problem pub struct SimpleIso3PriorProblem { /// True world from robot isometry - pub true_world_from_robot: Isometry3, + pub true_world_from_robot: Isometry3, /// Estimated world from robot isometry - pub est_world_from_robot: Isometry3, + pub est_world_from_robot: Isometry3, } impl Default for SimpleIso3PriorProblem { @@ -101,11 +100,11 @@ impl Default for SimpleIso3PriorProblem { impl SimpleIso3PriorProblem { fn new() -> Self { - let p = VecF64::<6>::from_c_array([0.2, 0.0, 1.0, 0.2, 0.0, 1.0]); - let true_world_from_robot = Isometry3::::exp(&p); + let p = VecF64::<6>::from_real_array([0.2, 0.0, 1.0, 0.2, 0.0, 1.0]); + let true_world_from_robot = Isometry3::::exp(&p); Self { true_world_from_robot, - est_world_from_robot: Isometry3::::identity(), + est_world_from_robot: Isometry3::::identity(), } } @@ -117,12 +116,12 @@ impl SimpleIso3PriorProblem { }]; let obs_pose_a_from_pose_b_poses = - CostSignature::<1, (Isometry3, MatF64<6, 6>), Isometry3PriorTermSignature> { + CostSignature::<1, (Isometry3, MatF64<6, 6>), Isometry3PriorTermSignature> { family_names: ["poses".into()], terms: cost_signature, }; - let family: VarFamily> = + let family: VarFamily> = VarFamily::new(VarKind::Free, vec![self.est_world_from_robot]); let families = VarPoolBuilder::new().add_family("poses", family).build(); @@ -144,7 +143,7 @@ impl SimpleIso3PriorProblem { initial_lm_nu: 1e-6, // if lm prior param is tiny }, ); - let refined_world_from_robot = up_families.get_members::>("poses".into()); + let refined_world_from_robot = up_families.get_members::>("poses".into()); approx::assert_abs_diff_eq!( self.true_world_from_robot.compact(), @@ -154,14 +153,8 @@ impl SimpleIso3PriorProblem { } } -mod tests { - - #[test] - fn simple_prior_opt_tests() { - use super::SimpleIso2PriorProblem; - use super::SimpleIso3PriorProblem; - - SimpleIso2PriorProblem::new().test(); - SimpleIso3PriorProblem::new().test(); - } +#[test] +fn simple_prior_opt_tests() { + SimpleIso2PriorProblem::new().test(); + SimpleIso3PriorProblem::new().test(); } diff --git a/crates/sophus_opt/src/lib.rs b/crates/sophus_opt/src/lib.rs index 0c3b10e..3800d6c 100644 --- a/crates/sophus_opt/src/lib.rs +++ b/crates/sophus_opt/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(portable_simd)] #![deny(missing_docs)] //! # Non-linear least squares optimization module diff --git a/crates/sophus_opt/src/solvers.rs b/crates/sophus_opt/src/solvers.rs index 0a7f553..dcb3500 100644 --- a/crates/sophus_opt/src/solvers.rs +++ b/crates/sophus_opt/src/solvers.rs @@ -206,9 +206,6 @@ pub fn solve(variables: &VarPool, costs: Vec>, nu: f64) -> VarPo // let inv_orig_marg_hessian = orig_marg_hessian.try_inverse().unwrap(); // track.delta = inv_orig_marg_hessian.clone() * track.gradient_free.clone(); -// // Eigen::Matrix inv_orig_marg_hessian = orig_marg_hessian.inverse(); -// // track.delta = * track.gradient_free; - // // outer prod // for j in 0..track.len { // let j_term = &evaluated_cost.terms[track.start_idx + j]; diff --git a/crates/sophus_opt/src/term.rs b/crates/sophus_opt/src/term.rs index dbcb7e8..8911e2b 100644 --- a/crates/sophus_opt/src/term.rs +++ b/crates/sophus_opt/src/term.rs @@ -3,9 +3,8 @@ use crate::block::NewBlockMatrix; use crate::robust_kernel; use crate::robust_kernel::IsRobustKernel; use crate::variables::VarKind; - -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use sophus_core::linalg::MatF64; +use sophus_core::linalg::VecF64; /// Evaluated cost term #[derive(Debug, Clone)] diff --git a/crates/sophus_opt/src/variables.rs b/crates/sophus_opt/src/variables.rs index 86dea10..440625b 100644 --- a/crates/sophus_opt/src/variables.rs +++ b/crates/sophus_opt/src/variables.rs @@ -1,9 +1,8 @@ -use sophus_calculus::types::params::HasParams; -use sophus_calculus::types::VecF64; -use sophus_lie::isometry2::Isometry2; -use sophus_lie::isometry3::Isometry3; - use dyn_clone::DynClone; +use sophus_core::linalg::VecF64; +use sophus_core::params::HasParams; +use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::groups::isometry3::Isometry3; use std::collections::BTreeMap; use std::collections::HashMap; use std::fmt::Debug; @@ -133,7 +132,7 @@ impl IsVariable for VecF64 { } } -impl IsVariable for Isometry2 { +impl IsVariable for Isometry2 { const DOF: usize = 3; fn update(&mut self, delta: nalgebra::DVectorView) { @@ -141,11 +140,14 @@ impl IsVariable for Isometry2 { for d in 0..Self::DOF { delta_vec[d] = delta[d]; } - self.set_params((Isometry2::::exp(&delta_vec) * &self.clone()).params()); + self.set_params( + (Isometry2::::group_mul(&Isometry2::::exp(&delta_vec), &self.clone())) + .params(), + ); } } -impl IsVariable for Isometry3 { +impl IsVariable for Isometry3 { const DOF: usize = 6; fn update(&mut self, delta: nalgebra::DVectorView) { @@ -154,7 +156,7 @@ impl IsVariable for Isometry3 { delta_vec[d] = delta[d]; } self.set_params( - (Isometry3::::group_mul(&Isometry3::::exp(&delta_vec), &self.clone())) + (Isometry3::::group_mul(&Isometry3::::exp(&delta_vec), &self.clone())) .params(), ); } diff --git a/crates/sophus_pyo3/Cargo.toml b/crates/sophus_pyo3/Cargo.toml index fac0ca7..02bbda7 100644 --- a/crates/sophus_pyo3/Cargo.toml +++ b/crates/sophus_pyo3/Cargo.toml @@ -11,14 +11,13 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_calculus.workspace = true +sophus_core.workspace = true sophus_lie.workspace = true -sophus_tensor.workspace = true nalgebra.workspace = true numpy.workspace = true [dependencies.pyo3] -version = "0.20.0" +version = "0.21.0" # "abi3-py38" tells pyo3 (and maturin) to build using the stable ABI with minimum Python version 3.8 features = ["abi3-py38", "multiple-pymethods"] diff --git a/crates/sophus_pyo3/src/lib.rs b/crates/sophus_pyo3/src/lib.rs index 435759c..0788058 100644 --- a/crates/sophus_pyo3/src/lib.rs +++ b/crates/sophus_pyo3/src/lib.rs @@ -1,10 +1,14 @@ +#![feature(portable_simd)] #![deny(missing_docs)] //! # Pyo3 module /// python wrapper pub mod pyo3; -use crate::pyo3::lie_groups::{PyIsometry2, PyIsometry3, PyRotation2, PyRotation3}; +use crate::pyo3::lie_groups::PyIsometry2; +use crate::pyo3::lie_groups::PyIsometry3; +use crate::pyo3::lie_groups::PyRotation2; +use crate::pyo3::lie_groups::PyRotation3; use numpy::pyo3::prelude::*; /// A Python module implemented in Rust. The name of this function must match diff --git a/crates/sophus_pyo3/src/pyo3/errors.rs b/crates/sophus_pyo3/src/pyo3/errors.rs index c99f6d4..856600f 100644 --- a/crates/sophus_pyo3/src/pyo3/errors.rs +++ b/crates/sophus_pyo3/src/pyo3/errors.rs @@ -1,5 +1,6 @@ use numpy::PyArray1; -use pyo3::{exceptions::PyOSError, PyErr}; +use pyo3::exceptions::PyOSError; +use pyo3::PyErr; use std::fmt; /// Error for mismatched array dimensions diff --git a/crates/sophus_pyo3/src/pyo3/lie_groups.rs b/crates/sophus_pyo3/src/pyo3/lie_groups.rs index a75d3ae..0757a86 100644 --- a/crates/sophus_pyo3/src/pyo3/lie_groups.rs +++ b/crates/sophus_pyo3/src/pyo3/lie_groups.rs @@ -1,19 +1,17 @@ use crate::pyo3::errors::check_array1_dim_impl; use crate::pyo3::errors::PyArray1DimMismatch; - -use sophus_calculus::types::params::HasParams; -use sophus_lie::isometry2::Isometry2; -use sophus_lie::isometry3::Isometry3; -use sophus_lie::rotation2::Rotation2; -use sophus_lie::rotation3::Rotation3; -use sophus_lie::traits::IsTranslationProductGroup; - use numpy::PyArray1; use numpy::PyArray2; use pyo3::pyclass; use pyo3::pymethods; use pyo3::Py; use pyo3::Python; +use sophus_core::params::HasParams; +use sophus_lie::groups::isometry2::Isometry2; +use sophus_lie::groups::isometry3::Isometry3; +use sophus_lie::groups::rotation2::Rotation2; +use sophus_lie::groups::rotation3::Rotation3; +use sophus_lie::traits::IsTranslationProductGroup; macro_rules! check_array1_dim { ($array:expr, $expected:expr) => { @@ -122,7 +120,7 @@ macro_rules! crate_py_lie_group_class { let point_slice = read_only_point.as_slice().unwrap(); let point_vec = nalgebra::SVector::::from_column_slice(point_slice); - let result = <$rust_group>::dx_exp_x_times_point_at_0(&point_vec); + let result = <$rust_group>::dx_exp_x_times_point_at_0(point_vec); Ok(PyArray1::from_slice(py, result.as_slice()) .reshape([$params, $point]) .unwrap() @@ -179,7 +177,7 @@ macro_rules! crate_py_lie_group_class { fn group_mul(&self, other: &$py_group) -> Self { Self { - inner: self.inner * &other.inner, + inner: self.inner.group_mul(&other.inner), } } @@ -287,7 +285,7 @@ macro_rules! crate_py_lie_group_class { fn __mul__(&self, other: &$py_group) -> Self { Self { - inner: self.inner * &other.inner, + inner: self.inner.group_mul(&other.inner), } } @@ -298,10 +296,10 @@ macro_rules! crate_py_lie_group_class { }; } -crate_py_lie_group_class!(PyRotation2, Rotation2::, "Rotation2", 1, 2, 2, 2); -crate_py_lie_group_class!(PyIsometry2, Isometry2::, "Isometry2", 3, 4, 2, 3); -crate_py_lie_group_class!(PyRotation3, Rotation3::, "Rotation3", 3, 4, 3, 3); -crate_py_lie_group_class!(PyIsometry3, Isometry3::, "Isometry3", 6, 7, 3, 4); +crate_py_lie_group_class!(PyRotation2, Rotation2::, "Rotation2", 1, 2, 2, 2); +crate_py_lie_group_class!(PyIsometry2, Isometry2::, "Isometry2", 3, 4, 2, 3); +crate_py_lie_group_class!(PyRotation3, Rotation3::, "Rotation3", 3, 4, 3, 3); +crate_py_lie_group_class!(PyIsometry3, Isometry3::, "Isometry3", 6, 7, 3, 4); macro_rules! augment_py_product_group_class { ($py_product_group: ident, $rust_group:ty, $py_factor_group: ident, $point:literal) => { @@ -361,5 +359,5 @@ macro_rules! augment_py_product_group_class { }; } -augment_py_product_group_class!(PyIsometry2, Isometry2, PyRotation2, 2); -augment_py_product_group_class!(PyIsometry3, Isometry3, PyRotation3, 3); +augment_py_product_group_class!(PyIsometry2, Isometry2, PyRotation2, 2); +augment_py_product_group_class!(PyIsometry3, Isometry3, PyRotation3, 3); diff --git a/crates/sophus_sensor/Cargo.toml b/crates/sophus_sensor/Cargo.toml index 983521a..aab90c6 100644 --- a/crates/sophus_sensor/Cargo.toml +++ b/crates/sophus_sensor/Cargo.toml @@ -11,10 +11,11 @@ repository.workspace = true version.workspace = true [dependencies] -sophus_calculus.workspace = true +sophus_core.workspace = true sophus_image.workspace = true approx.workspace = true assertables.workspace = true nalgebra.workspace = true ndarray.workspace = true +num-traits.workspace = true diff --git a/crates/sophus_sensor/src/affine.rs b/crates/sophus_sensor/src/affine.rs deleted file mode 100644 index 9cc0566..0000000 --- a/crates/sophus_sensor/src/affine.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::traits::IsCameraDistortionImpl; - -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; - -use std::marker::PhantomData; - -/// Affine "distortion" implementation -/// -/// This is not a distortion in the traditional sense, but rather a simple affine transformation. -#[derive(Debug, Clone, Copy)] -pub struct AffineDistortionImpl> { - phantom: PhantomData, -} - -impl> ParamsImpl for AffineDistortionImpl { - fn are_params_valid(params: &S::Vector<4>) -> bool { - params.real()[0] != 0.0 && params.real()[1] != 0.0 - } - - fn params_examples() -> Vec> { - vec![S::Vector::<4>::from_c_array([1.0, 1.0, 0.0, 0.0])] - } - - fn invalid_params_examples() -> Vec> { - vec![ - S::Vector::<4>::from_c_array([0.0, 1.0, 0.0, 0.0]), - S::Vector::<4>::from_c_array([1.0, 0.0, 0.0, 0.0]), - ] - } -} - -impl> IsCameraDistortionImpl for AffineDistortionImpl { - fn distort( - params: &S::Vector<4>, - proj_point_in_camera_z1_plane: &S::Vector<2>, - ) -> S::Vector<2> { - S::Vector::<2>::from_array([ - proj_point_in_camera_z1_plane.get(0) * params.get(0) + params.get(2), - proj_point_in_camera_z1_plane.get(1) * params.get(1) + params.get(3), - ]) - } - - fn undistort(params: &S::Vector<4>, distorted_point: &S::Vector<2>) -> S::Vector<2> { - S::Vector::<2>::from_array([ - (distorted_point.get(0) - params.get(2)) / params.get(0), - (distorted_point.get(1) - params.get(3)) / params.get(1), - ]) - } - - fn dx_distort_x( - params: &VecF64<4>, - _proj_point_in_camera_z1_plane: &VecF64<2>, - ) -> MatF64<2, 2> { - MatF64::<2, 2>::from_array2([[params[0], 0.0], [0.0, params[1]]]) - } -} diff --git a/crates/sophus_sensor/src/camera.rs b/crates/sophus_sensor/src/camera.rs new file mode 100644 index 0000000..c68049a --- /dev/null +++ b/crates/sophus_sensor/src/camera.rs @@ -0,0 +1,123 @@ +use super::traits::IsCameraDistortionImpl; +use super::traits::IsProjection; +use sophus_core::linalg::bool_mask::BoolMask; +use sophus_core::linalg::scalar::IsScalar; +use sophus_image::image_view::ImageSize; + +/// A generic camera model +#[derive(Debug, Copy, Clone)] +pub struct Camera< + S: IsScalar, + const DISTORT: usize, + const PARAMS: usize, + const BATCH: usize, + Distort: IsCameraDistortionImpl, + Proj: IsProjection, +> { + params: S::Vector, + phantom: std::marker::PhantomData<(Distort, Proj)>, + image_size: ImageSize, +} + +impl< + S: IsScalar, + const DISTORT: usize, + const PARAMS: usize, + const BATCH: usize, + Distort: IsCameraDistortionImpl, + Proj: IsProjection, + > Camera +{ + /// Creates a new camera + pub fn new(params: &S::Vector, image_size: ImageSize) -> Self { + Self::from_params_and_size(params, image_size) + } + + /// Creates a new camera from parameters and image size + pub fn from_params_and_size(params: &S::Vector, size: ImageSize) -> Self { + assert!( + Distort::are_params_valid(params).all(), + "Invalid parameters for {:?}", + params + ); + Self { + params: params.clone(), + phantom: std::marker::PhantomData, + image_size: size, + } + } + + /// Returns the image size + pub fn image_size(&self) -> ImageSize { + self.image_size + } + + /// Distortion - maps a point in the camera z=1 plane to a distorted point + pub fn distort(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Vector<2> { + Distort::distort(&self.params, proj_point_in_camera_z1_plane) + } + + /// Undistortion - maps a distorted pixel to a point in the camera z=1 plane + pub fn undistort(&self, pixel: &S::Vector<2>) -> S::Vector<2> { + Distort::undistort(&self.params, pixel) + } + + /// Derivative of the distortion w.r.t. the point in the camera z=1 plane + pub fn dx_distort_x(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Matrix<2, 2> { + Distort::dx_distort_x(&self.params, proj_point_in_camera_z1_plane) + } + + /// Projects a 3D point in the camera frame to a pixel in the image + pub fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2> { + self.distort(&Proj::proj(point_in_camera)) + } + + /// Unprojects a pixel in the image to a 3D point in the camera frame - assuming z=1 + pub fn cam_unproj(&self, point_in_camera: &S::Vector<2>) -> S::Vector<3> { + self.cam_unproj_with_z(point_in_camera, S::ones()) + } + + /// Unprojects a pixel in the image to a 3D point in the camera frame + pub fn cam_unproj_with_z(&self, point_in_camera: &S::Vector<2>, z: S) -> S::Vector<3> { + Proj::unproj(&self.undistort(point_in_camera), z) + } + + /// Sets the camera parameters + pub fn set_params(&mut self, params: &S::Vector) { + self.params = params.clone(); + } + + /// Returns the camera parameters + pub fn params(&self) -> &S::Vector { + &self.params + } + + /// Returns true if the camera is empty + pub fn is_empty(&self) -> bool { + self.image_size.width == 0 || self.image_size.height == 0 + } + + /// Examples of valid parameters + pub fn params_examples() -> Vec> { + Distort::params_examples() + } + + /// Examples of invalid parameters + pub fn invalid_params_examples() -> Vec> { + Distort::invalid_params_examples() + } +} + +impl< + S: IsScalar, + const DISTORT: usize, + const PARAMS: usize, + const BATCH: usize, + Distort: IsCameraDistortionImpl, + Proj: IsProjection, + > Default for Camera +{ + fn default() -> Self { + Self::from_params_and_size(&Distort::identity_params(), ImageSize::default()) + } +} diff --git a/crates/sophus_sensor/src/camera_enum.rs b/crates/sophus_sensor/src/camera_enum.rs new file mode 100644 index 0000000..58bf8ac --- /dev/null +++ b/crates/sophus_sensor/src/camera_enum.rs @@ -0,0 +1,4 @@ +/// general camera - either perspective or orthographic +pub mod general_camera; +/// perspective camera +pub mod perspective_camera; diff --git a/crates/sophus_sensor/src/camera_enum/general_camera.rs b/crates/sophus_sensor/src/camera_enum/general_camera.rs new file mode 100644 index 0000000..82efd43 --- /dev/null +++ b/crates/sophus_sensor/src/camera_enum/general_camera.rs @@ -0,0 +1,77 @@ +use crate::camera_enum::perspective_camera::PerspectiveCameraEnum; +use crate::projections::orthographic::OrthographicCamera; +use crate::traits::IsCameraEnum; +use sophus_core::linalg::scalar::IsScalar; +use sophus_image::image_view::ImageSize; + +/// Generalized camera enum +#[derive(Debug, Clone)] +pub enum GeneralCameraEnum, const BATCH: usize> { + /// Perspective camera enum + Perspective(PerspectiveCameraEnum), + /// Orthographic camera + Orthographic(OrthographicCamera), +} + +impl, const BATCH: usize> GeneralCameraEnum { + /// Create a new perspective camera instance + pub fn new_perspective(model: PerspectiveCameraEnum) -> Self { + Self::Perspective(model) + } +} + +impl, const BATCH: usize> IsCameraEnum + for GeneralCameraEnum +{ + fn new_pinhole(params: &S::Vector<4>, image_size: ImageSize) -> Self { + Self::Perspective(PerspectiveCameraEnum::new_pinhole(params, image_size)) + } + + fn new_kannala_brandt(params: &S::Vector<8>, image_size: ImageSize) -> Self { + Self::Perspective(PerspectiveCameraEnum::new_kannala_brandt( + params, image_size, + )) + } + + fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2> { + match self { + GeneralCameraEnum::Perspective(camera) => camera.cam_proj(point_in_camera), + GeneralCameraEnum::Orthographic(camera) => camera.cam_proj(point_in_camera), + } + } + + fn cam_unproj_with_z(&self, point_in_camera: &S::Vector<2>, z: S) -> S::Vector<3> { + match self { + GeneralCameraEnum::Perspective(camera) => camera.cam_unproj_with_z(point_in_camera, z), + GeneralCameraEnum::Orthographic(camera) => camera.cam_unproj_with_z(point_in_camera, z), + } + } + + fn distort(&self, point_in_camera: &S::Vector<2>) -> S::Vector<2> { + match self { + GeneralCameraEnum::Perspective(camera) => camera.distort(point_in_camera), + GeneralCameraEnum::Orthographic(camera) => camera.distort(point_in_camera), + } + } + + fn undistort(&self, point_in_camera: &S::Vector<2>) -> S::Vector<2> { + match self { + GeneralCameraEnum::Perspective(camera) => camera.undistort(point_in_camera), + GeneralCameraEnum::Orthographic(camera) => camera.undistort(point_in_camera), + } + } + + fn dx_distort_x(&self, point_in_camera: &S::Vector<2>) -> S::Matrix<2, 2> { + match self { + GeneralCameraEnum::Perspective(camera) => camera.dx_distort_x(point_in_camera), + GeneralCameraEnum::Orthographic(camera) => camera.dx_distort_x(point_in_camera), + } + } + + fn image_size(&self) -> ImageSize { + match self { + GeneralCameraEnum::Perspective(camera) => camera.image_size(), + GeneralCameraEnum::Orthographic(camera) => camera.image_size(), + } + } +} diff --git a/crates/sophus_sensor/src/camera_enum/perspective_camera.rs b/crates/sophus_sensor/src/camera_enum/perspective_camera.rs new file mode 100644 index 0000000..a1971b8 --- /dev/null +++ b/crates/sophus_sensor/src/camera_enum/perspective_camera.rs @@ -0,0 +1,95 @@ +use crate::camera::Camera; +use crate::distortions::affine::AffineDistortionImpl; +use crate::distortions::kannala_brandt::KannalaBrandtDistortionImpl; +use crate::projections::perspective::PerspectiveProjection; +use crate::traits::IsCameraEnum; +use crate::traits::IsPerspectiveCameraEnum; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_image::image_view::ImageSize; + +/// Pinhole camera +pub type PinholeCamera = + Camera, PerspectiveProjection>; +/// Kannala-Brandt camera +pub type KannalaBrandtCamera = + Camera, PerspectiveProjection>; + +/// Perspective camera enum +#[derive(Debug, Clone)] +pub enum PerspectiveCameraEnum, const BATCH: usize> { + /// Pinhole camera + Pinhole(PinholeCamera), + /// Kannala-Brandt camera + KannalaBrandt(KannalaBrandtCamera), +} + +impl, const BATCH: usize> IsCameraEnum + for PerspectiveCameraEnum +{ + fn new_pinhole(params: &S::Vector<4>, image_size: ImageSize) -> Self { + Self::Pinhole(PinholeCamera::from_params_and_size(params, image_size)) + } + fn image_size(&self) -> ImageSize { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.image_size(), + PerspectiveCameraEnum::KannalaBrandt(camera) => camera.image_size(), + } + } + + fn new_kannala_brandt(params: &S::Vector<8>, image_size: ImageSize) -> Self { + Self::KannalaBrandt(KannalaBrandtCamera::from_params_and_size( + params, image_size, + )) + } + + fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.cam_proj(point_in_camera), + PerspectiveCameraEnum::KannalaBrandt(camera) => camera.cam_proj(point_in_camera), + } + } + + fn cam_unproj_with_z(&self, point_in_camera: &S::Vector<2>, z: S) -> S::Vector<3> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.cam_unproj_with_z(point_in_camera, z), + PerspectiveCameraEnum::KannalaBrandt(camera) => { + camera.cam_unproj_with_z(point_in_camera, z) + } + } + } + + fn distort(&self, point_in_camera: &S::Vector<2>) -> S::Vector<2> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.distort(point_in_camera), + PerspectiveCameraEnum::KannalaBrandt(camera) => camera.distort(point_in_camera), + } + } + + fn undistort(&self, point_in_camera: &S::Vector<2>) -> S::Vector<2> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.undistort(point_in_camera), + PerspectiveCameraEnum::KannalaBrandt(camera) => camera.undistort(point_in_camera), + } + } + + fn dx_distort_x(&self, point_in_camera: &S::Vector<2>) -> S::Matrix<2, 2> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.dx_distort_x(point_in_camera), + PerspectiveCameraEnum::KannalaBrandt(camera) => camera.dx_distort_x(point_in_camera), + } + } +} + +impl, const BATCH: usize> IsPerspectiveCameraEnum + for PerspectiveCameraEnum +{ + fn pinhole_params(&self) -> S::Vector<4> { + match self { + PerspectiveCameraEnum::Pinhole(camera) => camera.params().clone(), + PerspectiveCameraEnum::KannalaBrandt(camera) => { + camera.params().get_fixed_subvec::<4>(0) + } + } + } +} diff --git a/crates/sophus_sensor/src/distortion_table.rs b/crates/sophus_sensor/src/distortion_table.rs index 845b375..7f0feec 100644 --- a/crates/sophus_sensor/src/distortion_table.rs +++ b/crates/sophus_sensor/src/distortion_table.rs @@ -1,9 +1,14 @@ -use sophus_calculus::region::IsRegion; -use sophus_calculus::region::Region; -use sophus_calculus::types::VecF64; +use crate::dyn_camera::DynCamera; +use nalgebra::SVector; +use sophus_core::calculus::region::IsRegion; +use sophus_core::calculus::region::Region; +use sophus_core::linalg::vector::IsVector; +use sophus_core::linalg::VecF64; use sophus_image::arc_image::ArcImage2F32; use sophus_image::image_view::IsImageView; use sophus_image::interpolation::interpolate; +use sophus_image::mut_image::MutImage2F32; +use sophus_image::mut_image_view::IsMutImageView; /// A table of distortion values. #[derive(Debug, Clone)] @@ -38,3 +43,71 @@ impl DistortTable { VecF64::<2>::new(p2[0] as f64, p2[1] as f64) } } + +/// Returns the undistortion lookup table +pub fn undistort_table(cam: &DynCamera) -> MutImage2F32 { + let mut table = MutImage2F32::from_image_size(cam.image_size()); + let w = cam.image_size().width; + let h = cam.image_size().height; + for v in 0..h { + for u in 0..w { + let pixel = cam.undistort(&VecF64::<2>::from_f64_array([u as f64, v as f64])); + *table.mut_pixel(u, v) = pixel.cast(); + } + } + table +} + +/// Returns the distortion lookup table +pub fn distort_table(cam: &DynCamera) -> DistortTable { + // first we find min and max values in the proj plane + // just test the 4 corners might not be enough + // so we will test the image boundary + + let mut region = Region::<2>::empty(); + + let w = cam.image_size().width; + let h = cam.image_size().height; + + for u in 0..cam.image_size().width { + // top border + let v = 0; + let point_in_proj = cam.undistort(&VecF64::<2>::from_f64_array([u as f64, v as f64])); + region.extend(&point_in_proj); + // bottom border + let v = cam.image_size().height - 1; + let point_in_proj = cam.undistort(&VecF64::<2>::new(u as f64, v as f64)); + region.extend(&point_in_proj); + } + for v in 0..cam.image_size().height { + // left border + let u = 0; + let point_in_proj = cam.undistort(&VecF64::<2>::new(u as f64, v as f64)); + region.extend(&point_in_proj); + // right border + let u = cam.image_size().width - 1; + let point_in_proj = cam.undistort(&VecF64::<2>::new(u as f64, v as f64)); + region.extend(&point_in_proj); + } + let region = Region::<2>::from_min_max(region.min().cast() * 2.0, region.max().cast() * 2.0); + + let mut distort_table = DistortTable { + table: ArcImage2F32::from_image_size_and_val(cam.image_size(), SVector::::zeros()), + region, + }; + + let mut table = MutImage2F32::from_image_size(cam.image_size()); + + for v in 0..h { + for u in 0..w { + let point_proj = VecF64::<2>::new( + distort_table.offset().x + (u as f64) * distort_table.incr().x, + distort_table.offset().y + (v as f64) * distort_table.incr().y, + ); + let pixel = cam.distort(&point_proj); + *table.mut_pixel(u, v) = SVector::::new(pixel.cast().x, pixel.cast().y); + } + } + distort_table.table = table.into(); + distort_table +} diff --git a/crates/sophus_sensor/src/distortions.rs b/crates/sophus_sensor/src/distortions.rs new file mode 100644 index 0000000..945a54e --- /dev/null +++ b/crates/sophus_sensor/src/distortions.rs @@ -0,0 +1,4 @@ +/// Affine distortion - for pinhole cameras +pub mod affine; +/// Kannala-Brandt distortion - for fisheye cameras +pub mod kannala_brandt; diff --git a/crates/sophus_sensor/src/distortions/affine.rs b/crates/sophus_sensor/src/distortions/affine.rs new file mode 100644 index 0000000..b7ab9e4 --- /dev/null +++ b/crates/sophus_sensor/src/distortions/affine.rs @@ -0,0 +1,65 @@ +use crate::traits::IsCameraDistortionImpl; +use sophus_core::linalg::bool_mask::BoolMask; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::ParamsImpl; +use std::marker::PhantomData; + +/// Affine "distortion" implementation +/// +/// This is not a distortion in the traditional sense, but rather a simple affine transformation. +#[derive(Debug, Clone, Copy)] +pub struct AffineDistortionImpl, const BATCH: usize> { + phantom: PhantomData, +} + +impl, const BATCH: usize> ParamsImpl + for AffineDistortionImpl +{ + fn are_params_valid(_params: &S::Vector<4>) -> S::Mask { + S::Mask::all_true() + } + + fn params_examples() -> Vec> { + vec![S::Vector::<4>::from_f64_array([1.0, 1.0, 0.0, 0.0])] + } + + fn invalid_params_examples() -> Vec> { + vec![ + S::Vector::<4>::from_f64_array([0.0, 1.0, 0.0, 0.0]), + S::Vector::<4>::from_f64_array([1.0, 0.0, 0.0, 0.0]), + ] + } +} + +impl, const BATCH: usize> IsCameraDistortionImpl + for AffineDistortionImpl +{ + fn distort( + params: &S::Vector<4>, + proj_point_in_camera_z1_plane: &S::Vector<2>, + ) -> S::Vector<2> { + S::Vector::<2>::from_array([ + proj_point_in_camera_z1_plane.get_elem(0) * params.get_elem(0) + params.get_elem(2), + proj_point_in_camera_z1_plane.get_elem(1) * params.get_elem(1) + params.get_elem(3), + ]) + } + + fn undistort(params: &S::Vector<4>, distorted_point: &S::Vector<2>) -> S::Vector<2> { + S::Vector::<2>::from_array([ + (distorted_point.get_elem(0) - params.get_elem(2)) / params.get_elem(0), + (distorted_point.get_elem(1) - params.get_elem(3)) / params.get_elem(1), + ]) + } + + fn dx_distort_x( + params: &S::Vector<4>, + _proj_point_in_camera_z1_plane: &S::Vector<2>, + ) -> S::Matrix<2, 2> { + S::Matrix::<2, 2>::from_array2([ + [params.get_elem(0), S::zeros()], + [S::zeros(), params.get_elem(1)], + ]) + } +} diff --git a/crates/sophus_sensor/src/distortions/kannala_brandt.rs b/crates/sophus_sensor/src/distortions/kannala_brandt.rs new file mode 100644 index 0000000..bf41286 --- /dev/null +++ b/crates/sophus_sensor/src/distortions/kannala_brandt.rs @@ -0,0 +1,223 @@ +use crate::traits::IsCameraDistortionImpl; +use sophus_core::linalg::bool_mask::BoolMask; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::ParamsImpl; +use std::marker::PhantomData; + +/// Kannala-Brandt distortion implementation +#[derive(Debug, Clone, Copy)] +pub struct KannalaBrandtDistortionImpl, const BATCH: usize> { + phantom: PhantomData, +} + +impl, const BATCH: usize> ParamsImpl + for KannalaBrandtDistortionImpl +{ + fn are_params_valid(_params: &S::Vector<8>) -> S::Mask { + S::Mask::all_true() + } + + fn params_examples() -> Vec> { + vec![S::Vector::<8>::from_f64_array([ + 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + ])] + } + + fn invalid_params_examples() -> Vec> { + vec![ + S::Vector::<8>::from_f64_array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + S::Vector::<8>::from_f64_array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), + ] + } +} + +impl, const BATCH: usize> IsCameraDistortionImpl + for KannalaBrandtDistortionImpl +{ + fn distort( + params: &S::Vector<8>, + proj_point_in_camera_z1_plane: &S::Vector<2>, + ) -> S::Vector<2> { + let k0 = params.get_elem(4); + let k1 = params.get_elem(5); + let k2 = params.get_elem(6); + let k3 = params.get_elem(7); + + let radius_sq = proj_point_in_camera_z1_plane.get_elem(0) + * proj_point_in_camera_z1_plane.get_elem(0) + + proj_point_in_camera_z1_plane.get_elem(1) * proj_point_in_camera_z1_plane.get_elem(1); + + let radius = radius_sq.clone().sqrt(); + let radius_inverse = S::from_f64(1.0) / radius.clone(); + let theta = radius.atan2(S::from_f64(1.0)); + let theta2 = theta.clone() * theta.clone(); + let theta4 = theta2.clone() * theta2.clone(); + let theta6 = theta2.clone() * theta4.clone(); + let theta8 = theta4.clone() * theta4.clone(); + + let r_distorted = + theta * (S::from_f64(1.0) + k0 * theta2 + k1 * theta4 + k2 * theta6 + k3 * theta8); + let scaling = r_distorted * radius_inverse; + + let near_zero = radius_sq.less_equal(&S::from_f64(1e-8)); + + let scaling = S::ones().select(&near_zero, scaling); + + S::Vector::<2>::from_array([ + scaling.clone() * proj_point_in_camera_z1_plane.get_elem(0) * params.get_elem(0) + + params.get_elem(2), + scaling * proj_point_in_camera_z1_plane.get_elem(1) * params.get_elem(1) + + params.get_elem(3), + ]) + } + + fn undistort(params: &S::Vector<8>, distorted_point: &S::Vector<2>) -> S::Vector<2> { + let fu = params.get_elem(0); + let fv = params.get_elem(1); + let u0 = params.get_elem(2); + let v0 = params.get_elem(3); + + let k0 = params.get_elem(4); + let k1 = params.get_elem(5); + let k2 = params.get_elem(6); + let k3 = params.get_elem(7); + + let un = (distorted_point.get_elem(0) - u0) / fu; + let vn = (distorted_point.get_elem(1) - v0) / fv; + let rth2 = un.clone() * un.clone() + vn.clone() * vn.clone(); + + let rth2_near_zero = rth2.less_equal(&S::from_f64(1e-8)); + let point_z1_plane0 = S::Vector::<2>::from_array([un.clone(), vn.clone()]); + + let rth = rth2.sqrt(); + + let mut th = rth.clone().sqrt(); + + let mut iters = 0; + loop { + let th2 = th.clone() * th.clone(); + let th4 = th2.clone() * th2.clone(); + let th6 = th2.clone() * th4.clone(); + let th8 = th4.clone() * th4.clone(); + + let thd = th.clone() + * (S::from_f64(1.0) + + k0.clone() * th2.clone() + + k1.clone() * th4.clone() + + k2.clone() * th6.clone() + + k3.clone() * th8.clone()); + let d_thd_wtr_th = S::from_f64(1.0) + + S::from_f64(3.0) * k0.clone() * th2 + + S::from_f64(5.0) * k1.clone() * th4 + + S::from_f64(7.0) * k2.clone() * th6 + + S::from_f64(9.0) * k3.clone() * th8; + + let step = (thd - rth.clone()) / d_thd_wtr_th; + th -= step.clone(); + + if (step + .real_part() + .abs() + .less_equal(&S::RealScalar::from_f64(1e-8))) + .all() + { + break; + } + + iters += 1; + + if iters >= 20 { + // warn!("undistort: max iters ({}) reached, step: {}", iters, step); + break; + } + } + + let radius_undistorted = th.tan(); + + let radius_undistorted_near_zero = radius_undistorted.less_equal(&S::from_f64(0.0)); + + let sign = S::from_f64(-1.0).select(&radius_undistorted_near_zero, S::ones()); + + point_z1_plane0.select( + &rth2_near_zero, + S::Vector::<2>::from_array([ + sign.clone() * radius_undistorted.clone() * un / rth.clone(), + sign * radius_undistorted * vn / rth, + ]), + ) + } + + fn dx_distort_x( + params: &S::Vector<8>, + proj_point_in_camera_z1_plane: &S::Vector<2>, + ) -> S::Matrix<2, 2> { + let a = proj_point_in_camera_z1_plane.get_elem(0); + let b = proj_point_in_camera_z1_plane.get_elem(1); + let fx = params.get_elem(0); + let fy = params.get_elem(1); + + let k = params.get_fixed_rows::<4>(4); + + let radius_sq = a.clone() * a.clone() + b.clone() * b.clone(); + + let near_zero = radius_sq.less_equal(&S::from_f64(1e-8)); + + let dx0 = + S::Matrix::<2, 2>::from_array2([[fx.clone(), S::zeros()], [S::zeros(), fy.clone()]]); + + let c0 = a.clone() * a.clone(); + let c1 = b.clone() * b.clone(); + let c2 = c0.clone() + c1.clone(); + let c2_sqrt = c2.clone().sqrt(); + let c3 = + c2_sqrt.clone() * c2_sqrt.clone() * c2_sqrt.clone() * c2_sqrt.clone() * c2_sqrt.clone(); + let c4 = c2.clone() + S::from_f64(1.0); + let c5 = c2_sqrt.clone().atan(); + let c6 = c5.clone() * c5.clone(); // c5^2 + let c7 = c6.clone() * k.get_elem(0); + let c8 = c6.clone() * c6.clone(); // c5^4 + let c9 = c8.clone() * k.get_elem(1); + let c10 = c8.clone() * c6.clone(); // c5^6 + let c11 = c10.clone() * k.get_elem(2); + let c12 = c8.clone() * c8.clone() * k.get_elem(3); // c5^8 * k[3] + let c13 = S::from_f64(1.0) + * c4.clone() + * c5 + * (c11.clone() + c12.clone() + c7.clone() + c9.clone() + S::from_f64(1.0)); + let c14 = c13.clone() * c3.clone(); + let c15 = c2_sqrt.clone() * c2_sqrt.clone() * c2_sqrt.clone(); + let c16 = c13.clone() * c15.clone(); + let c17 = S::from_f64(1.0) * c11 + + S::from_f64(1.0) * c12 + + S::from_f64(2.0) + * c6.clone() + * (S::from_f64(4.0) * c10 * k.get_elem(3) + + S::from_f64(2.0) * c6 * k.get_elem(1) + + S::from_f64(3.0) * c8 * k.get_elem(2) + + k.get_elem(0)) + + S::from_f64(1.0) * c7 + + S::from_f64(1.0) * c9 + + S::from_f64(1.0); + let c18 = c17.clone() * c2.clone() * c2.clone(); + let c19 = S::from_f64(1.0) / c4; + let c20 = c19.clone() / (c2.clone() * c2.clone() * c2.clone()); + let c21 = a * b * c19 * (-c13 * c2 + c15 * c17) / c3; + + let dx = S::Matrix::<2, 2>::from_array2([ + [ + c20.clone() + * fx.clone() + * (-c0.clone() * c16.clone() + c0 * c18.clone() + c14.clone()), + c21.clone() * fx, + ], + [ + c21 * fy.clone(), + c20 * fy * (-c1.clone() * c16 + c1 * c18 + c14), + ], + ]); + + dx0.select(&near_zero, dx) + } +} diff --git a/crates/sophus_sensor/src/dyn_camera.rs b/crates/sophus_sensor/src/dyn_camera.rs index 716510d..7ead209 100644 --- a/crates/sophus_sensor/src/dyn_camera.rs +++ b/crates/sophus_sensor/src/dyn_camera.rs @@ -1,99 +1,111 @@ -use crate::distortion_table::DistortTable; -use crate::general_camera::GeneralCameraEnum; -use crate::perspective_camera::PerspectiveCameraEnum; +use crate::camera_enum::general_camera::GeneralCameraEnum; +use crate::camera_enum::perspective_camera::PerspectiveCameraEnum; use crate::traits::IsCameraEnum; - -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use crate::traits::IsPerspectiveCameraEnum; +use sophus_core::linalg::scalar::IsScalar; use sophus_image::image_view::ImageSize; -use sophus_image::mut_image::MutImage2F32; /// Dynamic camera facade #[derive(Debug, Clone)] -pub struct DynCameraFacade { +pub struct DynCameraFacade< + S: IsScalar, + const BATCH: usize, + CameraType: IsCameraEnum, +> { camera_type: CameraType, + phantom: std::marker::PhantomData, } /// Dynamic generalized camera (perspective or orthographic) -pub type DynGeneralCamera = DynCameraFacade; +pub type DynGeneralCamera = + DynCameraFacade>; /// Dynamic perspective camera -pub type DynCamera = DynCameraFacade; +pub type DynCamera = + DynCameraFacade>; -impl DynCameraFacade { +impl, const BATCH: usize, CameraType: IsCameraEnum> + DynCameraFacade +{ /// Create a new dynamic camera facade from a camera model pub fn from_model(camera_type: CameraType) -> Self { - Self { camera_type } + Self { + camera_type, + phantom: std::marker::PhantomData, + } } /// Create a pinhole camera instance - pub fn new_pinhole(params: &VecF64<4>, image_size: ImageSize) -> Self { + pub fn new_pinhole(params: &S::Vector<4>, image_size: ImageSize) -> Self { Self::from_model(CameraType::new_pinhole(params, image_size)) } /// Create a Kannala-Brandt camera instance - pub fn new_kannala_brandt(params: &VecF64<8>, image_size: ImageSize) -> Self { + pub fn new_kannala_brandt(params: &S::Vector<8>, image_size: ImageSize) -> Self { Self::from_model(CameraType::new_kannala_brandt(params, image_size)) } /// Projects a 3D point in the camera frame to a pixel in the image - pub fn cam_proj(&self, point_in_camera: &VecF64<3>) -> VecF64<2> { + pub fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2> { self.camera_type.cam_proj(point_in_camera) } /// Unprojects a pixel in the image to a 3D point in the camera frame - assuming z=1 - pub fn cam_unproj(&self, pixel: &VecF64<2>) -> VecF64<3> { - self.cam_unproj_with_z(pixel, 1.0) + pub fn cam_unproj(&self, pixel: &S::Vector<2>) -> S::Vector<3> { + self.cam_unproj_with_z(pixel, S::ones()) } /// Unprojects a pixel in the image to a 3D point in the camera frame - pub fn cam_unproj_with_z(&self, pixel: &VecF64<2>, z: f64) -> VecF64<3> { + pub fn cam_unproj_with_z(&self, pixel: &S::Vector<2>, z: S) -> S::Vector<3> { self.camera_type.cam_unproj_with_z(pixel, z) } /// Distortion - maps a point in the camera z=1 plane to a distorted point - pub fn distort(&self, proj_point_in_camera_z1_plane: &VecF64<2>) -> VecF64<2> { + pub fn distort(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Vector<2> { self.camera_type.distort(proj_point_in_camera_z1_plane) } /// Undistortion - maps a distorted pixel to a point in the camera z=1 plane - pub fn undistort(&self, pixel: &VecF64<2>) -> VecF64<2> { + pub fn undistort(&self, pixel: &S::Vector<2>) -> S::Vector<2> { self.camera_type.undistort(pixel) } /// Derivative of the distortion w.r.t. the point in the camera z=1 plane - pub fn dx_distort_x(&self, point_in_camera: &VecF64<2>) -> MatF64<2, 2> { + pub fn dx_distort_x(&self, point_in_camera: &S::Vector<2>) -> S::Matrix<2, 2> { self.camera_type.dx_distort_x(point_in_camera) } - /// Returns undistortion lookup table - pub fn undistort_table(&self) -> MutImage2F32 { - self.camera_type.undistort_table() + /// Returns the image size + pub fn image_size(&self) -> ImageSize { + self.camera_type.image_size() } +} - /// Returns distortion lookup table - pub fn distort_table(&self) -> DistortTable { - self.camera_type.distort_table() +impl, const BATCH: usize> DynCamera { + /// Returns the pinhole parameters + pub fn pinhole_params(&self) -> S::Vector<4> { + self.camera_type.pinhole_params() } } -mod tests { - - #[test] - fn camera_prop_tests() { - use approx::assert_abs_diff_eq; - use approx::assert_relative_eq; - - use crate::distortion_table::DistortTable; - use sophus_calculus::maps::vector_valued_maps::VectorValuedMapFromVector; - use sophus_image::image_view::ImageSize; - use sophus_image::image_view::IsImageView; - use sophus_image::interpolation::interpolate; - use sophus_image::mut_image::MutImage2F32; - - use super::DynCamera; - use super::VecF64; - let mut cameras: Vec = vec![]; - cameras.push(DynCamera::new_pinhole( +#[test] +fn dyn_camera_tests() { + use crate::distortion_table::distort_table; + use crate::distortion_table::undistort_table; + use crate::distortion_table::DistortTable; + use approx::assert_abs_diff_eq; + use approx::assert_relative_eq; + use sophus_core::calculus::maps::vector_valued_maps::VectorValuedMapFromVector; + use sophus_core::linalg::VecF64; + use sophus_image::image_view::ImageSize; + use sophus_image::image_view::IsImageView; + use sophus_image::interpolation::interpolate; + use sophus_image::mut_image::MutImage2F32; + + type DynCameraF64 = DynCamera; + + { + let mut cameras: Vec = vec![]; + cameras.push(DynCameraF64::new_pinhole( &VecF64::<4>::new(600.0, 600.0, 319.5, 239.5), ImageSize { width: 640, @@ -119,7 +131,7 @@ mod tests { VecF64::<2>::new(639.0, 479.0), ]; - let table: MutImage2F32 = camera.undistort_table(); + let table: MutImage2F32 = undistort_table(&camera); for pixel in pixels_in_image.clone() { for d in [1.0, 0.1, 0.5, 1.1, 3.0, 15.0] { @@ -147,7 +159,7 @@ mod tests { assert_relative_eq!(dx, numeric_dx, epsilon = 1e-4); } - let table: DistortTable = camera.distort_table(); + let table: DistortTable = distort_table(&camera); for pixel in pixels_in_image { let proj = camera.undistort(&pixel); @@ -162,14 +174,8 @@ mod tests { } } - #[test] - fn camera_pinhole_distort_test() { - use approx::assert_relative_eq; - use sophus_image::image_view::ImageSize; - - use super::DynCamera; - use super::VecF64; - let camera: DynCamera = DynCamera::new_pinhole( + { + let camera: DynCameraF64 = DynCameraF64::new_pinhole( &VecF64::<4>::new(600.0, 600.0, 319.5, 239.5), ImageSize { width: 640, @@ -208,14 +214,8 @@ mod tests { } } - #[test] - fn camera_kannala_brandt_distort_test() { - use approx::assert_relative_eq; - use sophus_image::image_view::ImageSize; - - use super::DynCamera; - use super::VecF64; - let camera: DynCamera = DynCamera::new_kannala_brandt( + { + let camera: DynCameraF64 = DynCameraF64::new_kannala_brandt( &VecF64::<8>::from_vec(vec![1000.0, 1000.0, 320.0, 280.0, 0.1, 0.01, 0.001, 0.0001]), ImageSize { width: 640, diff --git a/crates/sophus_sensor/src/general_camera.rs b/crates/sophus_sensor/src/general_camera.rs deleted file mode 100644 index 38064f4..0000000 --- a/crates/sophus_sensor/src/general_camera.rs +++ /dev/null @@ -1,93 +0,0 @@ -use crate::distortion_table::DistortTable; -use crate::ortho_camera::OrthoCamera; -use crate::perspective_camera::PerspectiveCameraEnum; -use crate::traits::IsCameraEnum; - -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_image::image_view::ImageSize; -use sophus_image::mut_image::MutImage2F32; - -/// Generalized camera enum -#[derive(Debug, Clone)] -pub enum GeneralCameraEnum { - /// Perspective camera enum - Perspective(PerspectiveCameraEnum), - /// Orthographic camera - Ortho(OrthoCamera), -} - -impl GeneralCameraEnum { - /// Create a new perspective camera instance - pub fn new_perspective(model: PerspectiveCameraEnum) -> Self { - Self::Perspective(model) - } -} - -impl IsCameraEnum for GeneralCameraEnum { - fn new_pinhole(params: &VecF64<4>, image_size: ImageSize) -> Self { - Self::Perspective(PerspectiveCameraEnum::new_pinhole(params, image_size)) - } - - fn new_kannala_brandt(params: &VecF64<8>, image_size: ImageSize) -> Self { - Self::Perspective(PerspectiveCameraEnum::new_kannala_brandt( - params, image_size, - )) - } - - fn cam_proj(&self, point_in_camera: &VecF64<3>) -> VecF64<2> { - match self { - GeneralCameraEnum::Perspective(camera) => camera.cam_proj(point_in_camera), - GeneralCameraEnum::Ortho(camera) => camera.cam_proj(point_in_camera), - } - } - - fn cam_unproj_with_z(&self, point_in_camera: &VecF64<2>, z: f64) -> VecF64<3> { - match self { - GeneralCameraEnum::Perspective(camera) => camera.cam_unproj_with_z(point_in_camera, z), - GeneralCameraEnum::Ortho(camera) => camera.cam_unproj_with_z(point_in_camera, z), - } - } - - fn distort(&self, point_in_camera: &VecF64<2>) -> VecF64<2> { - match self { - GeneralCameraEnum::Perspective(camera) => camera.distort(point_in_camera), - GeneralCameraEnum::Ortho(camera) => camera.distort(point_in_camera), - } - } - - fn undistort(&self, point_in_camera: &VecF64<2>) -> VecF64<2> { - match self { - GeneralCameraEnum::Perspective(camera) => camera.undistort(point_in_camera), - GeneralCameraEnum::Ortho(camera) => camera.undistort(point_in_camera), - } - } - - fn dx_distort_x(&self, point_in_camera: &VecF64<2>) -> MatF64<2, 2> { - match self { - GeneralCameraEnum::Perspective(camera) => camera.dx_distort_x(point_in_camera), - GeneralCameraEnum::Ortho(camera) => camera.dx_distort_x(point_in_camera), - } - } - - fn undistort_table(&self) -> MutImage2F32 { - match self { - GeneralCameraEnum::Perspective(camera) => camera.undistort_table(), - GeneralCameraEnum::Ortho(camera) => camera.undistort_table(), - } - } - - fn image_size(&self) -> ImageSize { - match self { - GeneralCameraEnum::Perspective(camera) => camera.image_size(), - GeneralCameraEnum::Ortho(camera) => camera.image_size(), - } - } - - fn distort_table(&self) -> DistortTable { - match self { - GeneralCameraEnum::Perspective(camera) => camera.distort_table(), - GeneralCameraEnum::Ortho(camera) => camera.distort_table(), - } - } -} diff --git a/crates/sophus_sensor/src/generic_camera.rs b/crates/sophus_sensor/src/generic_camera.rs deleted file mode 100644 index b05ff86..0000000 --- a/crates/sophus_sensor/src/generic_camera.rs +++ /dev/null @@ -1,211 +0,0 @@ -use nalgebra::SVector; - -use super::distortion_table::DistortTable; -use super::traits::IsCameraDistortionImpl; -use super::traits::IsProjection; -use sophus_calculus::region; -use sophus_calculus::region::IsRegion; -use sophus_calculus::region::Region; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_image::arc_image::ArcImage2F32; -use sophus_image::image_view::ImageSize; -use sophus_image::mut_image::MutImage2F32; -use sophus_image::mut_image_view::IsMutImageView; - -/// A generic camera model -#[derive(Debug, Copy, Clone)] -pub struct Camera< - S: IsScalar<1>, - const DISTORT: usize, - const PARAMS: usize, - Distort: IsCameraDistortionImpl, - Proj: IsProjection, -> { - params: S::Vector, - phantom: std::marker::PhantomData<(Distort, Proj)>, - image_size: ImageSize, -} - -impl< - S: IsScalar<1>, - const DISTORT: usize, - const PARAMS: usize, - Distort: IsCameraDistortionImpl, - Proj: IsProjection, - > Camera -{ - /// Creates a new camera - pub fn new(params: &S::Vector, image_size: ImageSize) -> Self { - Self::from_params_and_size(params, image_size) - } - - /// Creates a new camera from parameters and image size - pub fn from_params_and_size(params: &S::Vector, size: ImageSize) -> Self { - assert!( - Distort::are_params_valid(params), - "Invalid parameters for {:?}", - params - ); - Self { - params: params.clone(), - phantom: std::marker::PhantomData, - image_size: size, - } - } - - /// Returns the image size - pub fn image_size(&self) -> ImageSize { - self.image_size - } - - /// Distortion - maps a point in the camera z=1 plane to a distorted point - pub fn distort(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Vector<2> { - Distort::distort(&self.params, proj_point_in_camera_z1_plane) - } - - /// Undistortion - maps a distorted pixel to a point in the camera z=1 plane - pub fn undistort(&self, pixel: &S::Vector<2>) -> S::Vector<2> { - Distort::undistort(&self.params, pixel) - } - - /// Derivative of the distortion w.r.t. the point in the camera z=1 plane - pub fn dx_distort_x(&self, proj_point_in_camera_z1_plane: &VecF64<2>) -> MatF64<2, 2> { - Distort::dx_distort_x(self.params.real(), proj_point_in_camera_z1_plane) - } - - /// Projects a 3D point in the camera frame to a pixel in the image - pub fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2> { - self.distort(&Proj::proj(point_in_camera)) - } - - /// Unprojects a pixel in the image to a 3D point in the camera frame - assuming z=1 - pub fn cam_unproj(&self, point_in_camera: &S::Vector<2>) -> S::Vector<3> { - self.cam_unproj_with_z(point_in_camera, 1.0.into()) - } - - /// Unprojects a pixel in the image to a 3D point in the camera frame - pub fn cam_unproj_with_z(&self, point_in_camera: &S::Vector<2>, z: S) -> S::Vector<3> { - Proj::unproj(&self.undistort(point_in_camera), z) - } - - /// Sets the camera parameters - pub fn set_params(&mut self, params: &S::Vector) { - self.params = params.clone(); - } - - /// Returns the camera parameters - pub fn params(&self) -> &S::Vector { - &self.params - } - - /// Returns true if the camera is empty - pub fn is_empty(&self) -> bool { - self.image_size.width == 0 || self.image_size.height == 0 - } - - /// Examples of valid parameters - pub fn params_examples() -> Vec> { - Distort::params_examples() - } - - /// Examples of invalid parameters - pub fn invalid_params_examples() -> Vec> { - Distort::invalid_params_examples() - } -} - -impl< - const DISTORT: usize, - const PARAMS: usize, - Distort: IsCameraDistortionImpl, - Proj: IsProjection, - > Camera -{ - /// Returns the undistortion lookup table - pub fn undistort_table(&self) -> MutImage2F32 { - let mut table = MutImage2F32::from_image_size(self.image_size); - let w = self.image_size.width; - let h = self.image_size.height; - for v in 0..h { - for u in 0..w { - let pixel = self.undistort(&VecF64::<2>::new(u as f64, v as f64)); - *table.mut_pixel(u, v) = pixel.cast(); - } - } - table - } - - /// Returns the distortion lookup table - pub fn distort_table(&self) -> DistortTable { - // first we find min and max values in the proj plane - // just test the 4 corners might not be enough - // so we will test the image boundary - - let mut region = Region::<2>::empty(); - - let w = self.image_size.width; - let h = self.image_size.height; - - for u in 0..self.image_size.width { - // top border - let v = 0; - let point_in_proj = self.undistort(&VecF64::<2>::new(u as f64, v as f64)); - region.extend(&point_in_proj); - // bottom border - let v = self.image_size.height - 1; - let point_in_proj = self.undistort(&VecF64::<2>::new(u as f64, v as f64)); - region.extend(&point_in_proj); - } - for v in 0..self.image_size.height { - // left border - let u = 0; - let point_in_proj = self.undistort(&VecF64::<2>::new(u as f64, v as f64)); - region.extend(&point_in_proj); - // right border - let u = self.image_size.width - 1; - let point_in_proj = self.undistort(&VecF64::<2>::new(u as f64, v as f64)); - region.extend(&point_in_proj); - } - let region = - region::Region::<2>::from_min_max(region.min().cast() * 2.0, region.max().cast() * 2.0); - - let mut distort_table = DistortTable { - table: ArcImage2F32::from_image_size_and_val( - self.image_size, - SVector::::zeros(), - ), - region, - }; - - let mut table = MutImage2F32::from_image_size(self.image_size); - - for v in 0..h { - for u in 0..w { - let point_proj = VecF64::<2>::new( - distort_table.offset().x + (u as f64) * distort_table.incr().x, - distort_table.offset().y + (v as f64) * distort_table.incr().y, - ); - let pixel = self.distort(&point_proj); - *table.mut_pixel(u, v) = SVector::::new(pixel.cast().x, pixel.cast().y); - } - } - distort_table.table = table.into(); - distort_table - } -} - -impl< - S: IsScalar<1>, - const DISTORT: usize, - const PARAMS: usize, - Distort: IsCameraDistortionImpl, - Proj: IsProjection, - > Default for Camera -{ - fn default() -> Self { - Self::from_params_and_size(&Distort::identity_params(), ImageSize::default()) - } -} diff --git a/crates/sophus_sensor/src/kannala_brandt.rs b/crates/sophus_sensor/src/kannala_brandt.rs deleted file mode 100644 index 1939410..0000000 --- a/crates/sophus_sensor/src/kannala_brandt.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::marker::PhantomData; - -use nalgebra::RowVector2; - -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; - -use super::affine::AffineDistortionImpl; -use super::traits::IsCameraDistortionImpl; - -/// Kannala-Brandt distortion implementation -#[derive(Debug, Clone, Copy)] -pub struct KannalaBrandtDistortionImpl> { - phantom: PhantomData, -} - -impl> ParamsImpl for KannalaBrandtDistortionImpl { - fn are_params_valid(params: &S::Vector<8>) -> bool { - params.real()[0] != 0.0 && params.real()[1] != 0.0 - } - - fn params_examples() -> Vec> { - vec![S::Vector::<8>::from_c_array([ - 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, - ])] - } - - fn invalid_params_examples() -> Vec> { - vec![ - S::Vector::<8>::from_c_array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), - S::Vector::<8>::from_c_array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]), - ] - } -} - -impl> IsCameraDistortionImpl for KannalaBrandtDistortionImpl { - fn distort( - params: &S::Vector<8>, - proj_point_in_camera_z1_plane: &S::Vector<2>, - ) -> S::Vector<2> { - let k0 = params.get(4); - let k1 = params.get(5); - let k2 = params.get(6); - let k3 = params.get(7); - - let radius_sq = proj_point_in_camera_z1_plane.get(0) * proj_point_in_camera_z1_plane.get(0) - + proj_point_in_camera_z1_plane.get(1) * proj_point_in_camera_z1_plane.get(1); - - if radius_sq.real() > 1e-8 { - let radius = radius_sq.sqrt(); - let radius_inverse = S::c(1.0) / radius.clone(); - let theta = radius.atan2(1.0.into()); - let theta2 = theta.clone() * theta.clone(); - let theta4 = theta2.clone() * theta2.clone(); - let theta6 = theta2.clone() * theta4.clone(); - let theta8 = theta4.clone() * theta4.clone(); - - let r_distorted = - theta * (S::c(1.0) + k0 * theta2 + k1 * theta4 + k2 * theta6 + k3 * theta8); - let scaling = r_distorted * radius_inverse; - return S::Vector::<2>::from_array([ - scaling.clone() * proj_point_in_camera_z1_plane.get(0) * params.get(0) - + params.get(2), - scaling * proj_point_in_camera_z1_plane.get(1) * params.get(1) + params.get(3), - ]); - } - let pinhole_params = params.get_fixed_rows::<4>(0); - AffineDistortionImpl::::distort(&pinhole_params, proj_point_in_camera_z1_plane) - } - - fn undistort(params: &S::Vector<8>, distorted_point: &S::Vector<2>) -> S::Vector<2> { - let fu = params.get(0); - let fv = params.get(1); - let u0 = params.get(2); - let v0 = params.get(3); - - let k0 = params.get(4); - let k1 = params.get(5); - let k2 = params.get(6); - let k3 = params.get(7); - - let un = (distorted_point.get(0) - u0) / fu; - let vn = (distorted_point.get(1) - v0) / fv; - let rth2 = un.clone() * un.clone() + vn.clone() * vn.clone(); - - if rth2.real() < 1e-8 { - return S::Vector::<2>::from_array([un, vn]); - } - - let rth = rth2.sqrt(); - - let mut th = rth.clone().sqrt(); - - let mut iters = 0; - loop { - let th2 = th.clone() * th.clone(); - let th4 = th2.clone() * th2.clone(); - let th6 = th2.clone() * th4.clone(); - let th8 = th4.clone() * th4.clone(); - - let thd = th.clone() - * (S::c(1.0) - + k0.clone() * th2.clone() - + k1.clone() * th4.clone() - + k2.clone() * th6.clone() - + k3.clone() * th8.clone()); - let d_thd_wtr_th = S::c(1.0) - + S::c(3.0) * k0.clone() * th2 - + S::c(5.0) * k1.clone() * th4 - + S::c(7.0) * k2.clone() * th6 - + S::c(9.0) * k3.clone() * th8; - - let step = (thd - rth.clone()) / d_thd_wtr_th; - th = th - step.clone(); - - if step.real().abs() < 1e-8 { - break; - } - - iters += 1; - - const MAX_ITERS: usize = 20; - const HIGH_ITERS: usize = MAX_ITERS / 2; - - if iters == HIGH_ITERS { - // debug!( - // "undistort: did not converge in {} iterations, step: {}", - // iters, step - // ); - } - - if iters > HIGH_ITERS { - // trace!( - // "undistort: did not converge in {} iterations, step: {}", - // iters, - // step - // ); - } - - if iters >= 20 { - // warn!("undistort: max iters ({}) reached, step: {}", iters, step); - break; - } - } - - let radius_undistorted = th.tan(); - - if radius_undistorted.real() < 0.0 { - S::Vector::<2>::from_array([ - -radius_undistorted.clone() * un / rth.clone(), - -radius_undistorted * vn / rth, - ]) - } else { - S::Vector::<2>::from_array([ - radius_undistorted.clone() * un / rth.clone(), - radius_undistorted * vn / rth, - ]) - } - } - - fn dx_distort_x(params: &VecF64<8>, proj_point_in_camera_z1_plane: &VecF64<2>) -> MatF64<2, 2> { - let a = proj_point_in_camera_z1_plane[0]; - let b = proj_point_in_camera_z1_plane[1]; - let fx = params[0]; - let fy = params[1]; - - let k = params.fixed_rows::<4>(4).into_owned(); - - let radius_sq = a * a + b * b; - - if radius_sq < 1e-8 { - return MatF64::<2, 2>::from_diagonal(&VecF64::<2>::new(fx, fy)); - } - - let c0 = a.powi(2); - let c1 = b.powi(2); - let c2 = c0 + c1; - let c3 = c2.powf(5.0 / 2.0); - let c4 = c2 + 1.0; - let c5 = c2.sqrt().atan(); - let c6 = c5.powi(2); - let c7 = c6 * k[0]; - let c8 = c5.powi(4); - let c9 = c8 * k[1]; - let c10 = c5.powi(6); - let c11 = c10 * k[2]; - let c12 = c5.powi(8) * k[3]; - let c13 = 1.0 * c4 * c5 * (c11 + c12 + c7 + c9 + 1.0); - let c14 = c13 * c3; - let c15 = c2.powf(3.0 / 2.0); - let c16 = c13 * c15; - let c17 = 1.0 * c11 - + 1.0 * c12 - + 2.0 * c6 * (4.0 * c10 * k[3] + 2.0 * c6 * k[1] + 3.0 * c8 * k[2] + k[0]) - + 1.0 * c7 - + 1.0 * c9 - + 1.0; - let c18 = c17 * c2.powi(2); - let c19 = 1.0 / c4; - let c20 = c19 / c2.powi(3); - let c21 = a * b * c19 * (-c13 * c2 + c15 * c17) / c3; - - MatF64::<2, 2>::from_rows(&[ - RowVector2::new(c20 * fx * (-c0 * c16 + c0 * c18 + c14), c21 * fx), - RowVector2::new(c21 * fy, c20 * fy * (-c1 * c16 + c1 * c18 + c14)), - ]) - } -} diff --git a/crates/sophus_sensor/src/lib.rs b/crates/sophus_sensor/src/lib.rs index 20d249c..9116ee5 100644 --- a/crates/sophus_sensor/src/lib.rs +++ b/crates/sophus_sensor/src/lib.rs @@ -1,22 +1,25 @@ +#![feature(portable_simd)] #![deny(missing_docs)] //! # Sensor (aka camera) module -/// Affine distortion - for pinhole cameras -pub mod affine; /// Distortion lookup table pub mod distortion_table; + /// A type-erased camera struct pub mod dyn_camera; -/// A generalized camera enum -pub mod general_camera; -/// A generic camera struct -pub mod generic_camera; -/// Kannala-Brandt distortion - for fisheye cameras -pub mod kannala_brandt; -/// Orthographic camera -pub mod ortho_camera; -/// Perspective camera -pub mod perspective_camera; + +/// A generic camera model +pub mod camera; + +/// Projection models +pub mod camera_enum; + +/// Projection models +pub mod projections; + +/// Distortion models +pub mod distortions; + /// Sensor traits pub mod traits; diff --git a/crates/sophus_sensor/src/ortho_camera.rs b/crates/sophus_sensor/src/ortho_camera.rs deleted file mode 100644 index ea067ae..0000000 --- a/crates/sophus_sensor/src/ortho_camera.rs +++ /dev/null @@ -1,33 +0,0 @@ -use std::marker::PhantomData; - -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; - -use super::affine::AffineDistortionImpl; -use super::generic_camera::Camera; -use super::traits::IsProjection; - -/// Orthographic projection implementation -#[derive(Debug, Clone)] -pub struct ProjectionOrtho> { - phantom: PhantomData, -} - -impl> IsProjection for ProjectionOrtho { - fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { - point_in_camera.get_fixed_rows::<2>(0) - } - - fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3> { - S::Vector::<3>::from_array([point_in_camera.get(0), point_in_camera.get(1), extension]) - } - - fn dx_proj_x(_point_in_camera: &VecF64<3>) -> MatF64<2, 3> { - unimplemented!("dx_proj_x not implemented for ProjectionOrtho") - } -} - -/// Orthographic camera -pub type OrthoCamera = Camera, ProjectionOrtho>; diff --git a/crates/sophus_sensor/src/perspective_camera.rs b/crates/sophus_sensor/src/perspective_camera.rs deleted file mode 100644 index 44addab..0000000 --- a/crates/sophus_sensor/src/perspective_camera.rs +++ /dev/null @@ -1,136 +0,0 @@ -use crate::affine::AffineDistortionImpl; -use crate::distortion_table::DistortTable; -use crate::generic_camera::Camera; -use crate::kannala_brandt::KannalaBrandtDistortionImpl; -use crate::traits::IsCameraEnum; -use crate::traits::IsProjection; - -use sophus_calculus::types::matrix::IsMatrix; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; -use sophus_image::image_view::ImageSize; -use sophus_image::mut_image::MutImage2F32; - -/// Perspective camera projection - using z=1 plane -/// -/// Projects a 3D point in the camera frame to a 2D point in the z=1 plane -#[derive(Debug, Clone, Copy)] -pub struct ProjectionZ1; - -impl> IsProjection for ProjectionZ1 { - fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { - S::Vector::<2>::from_array([ - point_in_camera.get(0) / point_in_camera.get(2), - point_in_camera.get(1) / point_in_camera.get(2), - ]) - } - - fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3> { - S::Vector::<3>::from_array([ - point_in_camera.get(0) * extension.clone(), - point_in_camera.get(1) * extension.clone(), - extension, - ]) - } - - fn dx_proj_x(point_in_camera: &VecF64<3>) -> MatF64<2, 3> { - MatF64::<2, 3>::from_array2([ - [ - 1.0 / point_in_camera[2], - 0.0, - -point_in_camera[0] / (point_in_camera[2] * point_in_camera[2]), - ], - [ - 0.0, - 1.0 / point_in_camera[2], - -point_in_camera[1] / (point_in_camera[2] * point_in_camera[2]), - ], - ]) - } -} - -/// Pinhole camera -pub type PinholeCamera = Camera, ProjectionZ1>; -/// Kannala-Brandt camera -pub type KannalaBrandtCamera = Camera, ProjectionZ1>; - -/// Perspective camera enum -#[derive(Debug, Clone, Copy)] -pub enum PerspectiveCameraEnum { - /// Pinhole camera - Pinhole(PinholeCamera), - /// Kannala-Brandt camera - KannalaBrandt(KannalaBrandtCamera), -} - -impl IsCameraEnum for PerspectiveCameraEnum { - fn new_pinhole(params: &VecF64<4>, image_size: ImageSize) -> Self { - Self::Pinhole(PinholeCamera::from_params_and_size(params, image_size)) - } - - fn new_kannala_brandt(params: &VecF64<8>, image_size: ImageSize) -> Self { - Self::KannalaBrandt(KannalaBrandtCamera::from_params_and_size( - params, image_size, - )) - } - - fn cam_proj(&self, point_in_camera: &VecF64<3>) -> VecF64<2> { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.cam_proj(point_in_camera), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.cam_proj(point_in_camera), - } - } - - fn cam_unproj_with_z(&self, point_in_camera: &VecF64<2>, z: f64) -> VecF64<3> { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.cam_unproj_with_z(point_in_camera, z), - PerspectiveCameraEnum::KannalaBrandt(camera) => { - camera.cam_unproj_with_z(point_in_camera, z) - } - } - } - - fn distort(&self, point_in_camera: &VecF64<2>) -> VecF64<2> { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.distort(point_in_camera), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.distort(point_in_camera), - } - } - - fn undistort(&self, point_in_camera: &VecF64<2>) -> VecF64<2> { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.undistort(point_in_camera), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.undistort(point_in_camera), - } - } - - fn dx_distort_x(&self, point_in_camera: &VecF64<2>) -> MatF64<2, 2> { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.dx_distort_x(point_in_camera), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.dx_distort_x(point_in_camera), - } - } - - fn undistort_table(&self) -> MutImage2F32 { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.undistort_table(), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.undistort_table(), - } - } - - fn image_size(&self) -> ImageSize { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.image_size(), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.image_size(), - } - } - - fn distort_table(&self) -> DistortTable { - match self { - PerspectiveCameraEnum::Pinhole(camera) => camera.distort_table(), - PerspectiveCameraEnum::KannalaBrandt(camera) => camera.distort_table(), - } - } -} diff --git a/crates/sophus_sensor/src/projections.rs b/crates/sophus_sensor/src/projections.rs new file mode 100644 index 0000000..fa4c0d5 --- /dev/null +++ b/crates/sophus_sensor/src/projections.rs @@ -0,0 +1,4 @@ +/// Orthographic camera +pub mod orthographic; +/// Perspective camera +pub mod perspective; diff --git a/crates/sophus_sensor/src/projections/orthographic.rs b/crates/sophus_sensor/src/projections/orthographic.rs new file mode 100644 index 0000000..494b90b --- /dev/null +++ b/crates/sophus_sensor/src/projections/orthographic.rs @@ -0,0 +1,36 @@ +use crate::camera::Camera; +use crate::distortions::affine::AffineDistortionImpl; +use crate::traits::IsProjection; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use std::marker::PhantomData; + +/// Orthographic projection implementation +#[derive(Debug, Clone)] +pub struct OrthographisProjection, const BATCH: usize> { + phantom: PhantomData, +} + +impl, const BATCH: usize> IsProjection + for OrthographisProjection +{ + fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { + point_in_camera.get_fixed_rows::<2>(0) + } + + fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3> { + S::Vector::<3>::from_array([ + point_in_camera.get_elem(0), + point_in_camera.get_elem(1), + extension, + ]) + } + + fn dx_proj_x(_point_in_camera: &S::Vector<3>) -> S::Matrix<2, 3> { + unimplemented!("dx_proj_x not implemented for ProjectionOrtho") + } +} + +/// Orthographic camera +pub type OrthographicCamera = + Camera, OrthographisProjection>; diff --git a/crates/sophus_sensor/src/projections/perspective.rs b/crates/sophus_sensor/src/projections/perspective.rs new file mode 100644 index 0000000..9b2d62c --- /dev/null +++ b/crates/sophus_sensor/src/projections/perspective.rs @@ -0,0 +1,44 @@ +use crate::traits::IsProjection; +use sophus_core::linalg::matrix::IsMatrix; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; + +/// Perspective camera projection - using z=1 plane +/// +/// Projects a 3D point in the camera frame to a 2D point in the z=1 plane +#[derive(Debug, Clone, Copy)] +pub struct PerspectiveProjection; + +impl, const BATCH: usize> IsProjection for PerspectiveProjection { + fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2> { + S::Vector::<2>::from_array([ + point_in_camera.get_elem(0) / point_in_camera.get_elem(2), + point_in_camera.get_elem(1) / point_in_camera.get_elem(2), + ]) + } + + fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3> { + S::Vector::<3>::from_array([ + point_in_camera.get_elem(0) * extension.clone(), + point_in_camera.get_elem(1) * extension.clone(), + extension, + ]) + } + + fn dx_proj_x(point_in_camera: &S::Vector<3>) -> S::Matrix<2, 3> { + S::Matrix::<2, 3>::from_array2([ + [ + S::ones() / point_in_camera.get_elem(2), + S::zeros(), + -point_in_camera.get_elem(0) + / (point_in_camera.get_elem(2) * point_in_camera.get_elem(2)), + ], + [ + S::zeros(), + S::ones() / point_in_camera.get_elem(2), + -point_in_camera.get_elem(1) + / (point_in_camera.get_elem(2) * point_in_camera.get_elem(2)), + ], + ]) + } +} diff --git a/crates/sophus_sensor/src/traits.rs b/crates/sophus_sensor/src/traits.rs index 42bf15f..820556e 100644 --- a/crates/sophus_sensor/src/traits.rs +++ b/crates/sophus_sensor/src/traits.rs @@ -1,23 +1,21 @@ -use crate::distortion_table::DistortTable; - -use sophus_calculus::types::params::ParamsImpl; -use sophus_calculus::types::scalar::IsScalar; -use sophus_calculus::types::vector::IsVector; -use sophus_calculus::types::vector::IsVectorLike; -use sophus_calculus::types::MatF64; -use sophus_calculus::types::VecF64; +use sophus_core::linalg::scalar::IsScalar; +use sophus_core::linalg::vector::IsVector; +use sophus_core::params::ParamsImpl; use sophus_image::image_view::ImageSize; -use sophus_image::mut_image::MutImage2F32; /// Camera distortion implementation trait -pub trait IsCameraDistortionImpl, const DISTORT: usize, const PARAMS: usize>: - ParamsImpl +pub trait IsCameraDistortionImpl< + S: IsScalar, + const DISTORT: usize, + const PARAMS: usize, + const BATCH: usize, +>: ParamsImpl { /// identity parameters fn identity_params() -> S::Vector { - let mut params = S::Vector::::zero(); - params.set_c(0, 1.0); - params.set_c(1, 1.0); + let mut params = S::Vector::::zeros(); + params.set_elem(0, S::ones()); + params.set_elem(1, S::ones()); params } @@ -32,13 +30,13 @@ pub trait IsCameraDistortionImpl, const DISTORT: usize, const PAR /// Derivative of the distortion w.r.t. the point in the camera z=1 plane fn dx_distort_x( - params: &VecF64, - proj_point_in_camera_z1_plane: &VecF64<2>, - ) -> MatF64<2, 2>; + params: &S::Vector, + proj_point_in_camera_z1_plane: &S::Vector<2>, + ) -> S::Matrix<2, 2>; } /// Camera projection implementation trait -pub trait IsProjection> { +pub trait IsProjection, const BATCH: usize> { /// Projects a 3D point in the camera frame to a 2D point in the z=1 plane fn proj(point_in_camera: &S::Vector<3>) -> S::Vector<2>; @@ -46,31 +44,33 @@ pub trait IsProjection> { fn unproj(point_in_camera: &S::Vector<2>, extension: S) -> S::Vector<3>; /// Derivative of the projection w.r.t. the point in the camera frame - fn dx_proj_x(point_in_camera: &VecF64<3>) -> MatF64<2, 3>; + fn dx_proj_x(point_in_camera: &S::Vector<3>) -> S::Matrix<2, 3>; } /// Camera trait -pub trait IsCameraEnum { +pub trait IsCameraEnum, const BATCH: usize> { /// Creates a new pinhole camera - fn new_pinhole(params: &VecF64<4>, image_size: ImageSize) -> Self; + fn new_pinhole(params: &S::Vector<4>, image_size: ImageSize) -> Self; /// Creates a new Kannala-Brandt camera - fn new_kannala_brandt(params: &VecF64<8>, image_size: ImageSize) -> Self; + fn new_kannala_brandt(params: &S::Vector<8>, image_size: ImageSize) -> Self; /// Returns the image size fn image_size(&self) -> ImageSize; /// Projects a 3D point in the camera frame to a pixel in the image - fn cam_proj(&self, point_in_camera: &VecF64<3>) -> VecF64<2>; + fn cam_proj(&self, point_in_camera: &S::Vector<3>) -> S::Vector<2>; /// Unprojects a pixel in the image to a 3D point in the camera frame - fn cam_unproj_with_z(&self, pixel: &VecF64<2>, z: f64) -> VecF64<3>; + fn cam_unproj_with_z(&self, pixel: &S::Vector<2>, z: S) -> S::Vector<3>; /// Distortion - maps a point in the camera z=1 plane to a distorted point - fn distort(&self, proj_point_in_camera_z1_plane: &VecF64<2>) -> VecF64<2>; + fn distort(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Vector<2>; /// Undistortion - maps a distorted pixel to a point in the camera z=1 plane - fn undistort(&self, pixel: &VecF64<2>) -> VecF64<2>; - /// Returns the undistortion lookup table - fn undistort_table(&self) -> MutImage2F32; - /// Returns the distortion lookup table - fn distort_table(&self) -> DistortTable; + fn undistort(&self, pixel: &S::Vector<2>) -> S::Vector<2>; /// Derivative of the distortion w.r.t. the point in the camera z=1 plane - fn dx_distort_x(&self, proj_point_in_camera_z1_plane: &VecF64<2>) -> MatF64<2, 2>; + fn dx_distort_x(&self, proj_point_in_camera_z1_plane: &S::Vector<2>) -> S::Matrix<2, 2>; +} + +/// Dynamic camera trait +pub trait IsPerspectiveCameraEnum, const BATCH: usize> { + /// Return the first four parameters: fx, fy, cx, cy + fn pinhole_params(&self) -> S::Vector<4>; } diff --git a/crates/sophus_tensor/src/arc_tensor.rs b/crates/sophus_tensor/src/arc_tensor.rs deleted file mode 100644 index 8aa36c3..0000000 --- a/crates/sophus_tensor/src/arc_tensor.rs +++ /dev/null @@ -1,599 +0,0 @@ -use ndarray::Dimension; - -use crate::element::BatchMat; -use crate::element::BatchScalar; -use crate::element::BatchVec; -use crate::element::IsStaticTensor; -use crate::element::IsTensorScalar; -use crate::element::SMat; -use crate::element::SVec; -use crate::mut_tensor::InnerScalarToVec; -use crate::mut_tensor::InnerVecToMat; -use crate::mut_tensor::MutTensor; -use crate::view::IsTensorLike; -use crate::view::IsTensorView; -use crate::view::TensorView; - -use std::marker::PhantomData; - -/// Arc tensor - a tensor with shared ownership -/// -/// See TensorView for more details of the tensor structure -#[derive(Debug, Clone)] -pub struct ArcTensor< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, -> where - ndarray::Dim<[ndarray::Ix; DRANK]>: Dimension, -{ - /// ndarray of tensors with shape [D1, D2, ...] - pub array: ndarray::ArcArray>, - phantom: PhantomData<(Scalar, STensor)>, -} - -// /// Tensor view of scalars -// pub type TensorViewX<'a, const DRANK: usize, Scalar> = -// TensorView<'a, DRANK, DRANK, 0, Scalar, Scalar, 1, 1, 1>; - -// /// Tensor view of batched scalars -// pub type TensorViewXB< -// 'a, -// const TOTAL_RANK: usize, -// const DRANK: usize, -// const SRANK: usize, -// Scalar, -// const B: usize, -// > = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchScalar, 1, 1, B>; - -// /// Tensor view of vectors with shape [R] -// pub type TensorViewXR< -// 'a, -// const TOTAL_RANK: usize, -// const DRANK: usize, -// const SRANK: usize, -// Scalar, -// const R: usize, -// > = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SVec, R, 1, 1>; - -// /// Tensor view of batched vectors with shape [R x B] -// pub type TensorViewXRB< -// 'a, -// const TOTAL_RANK: usize, -// const DRANK: usize, -// const SRANK: usize, -// Scalar, -// const R: usize, -// const B: usize, -// > = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchVec, R, 1, B>; - -// /// Tensor view of matrices with shape [R x C] -// pub type TensorViewXRC< -// 'a, -// const TOTAL_RANK: usize, -// const DRANK: usize, -// const SRANK: usize, -// Scalar, -// const R: usize, -// const C: usize, -// > = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, SMat, R, C, 1>; - -// /// Tensor view of batched matrices with shape [R x C x B] -// pub type TensorViewXRCB< -// 'a, -// const TOTAL_RANK: usize, -// const DRANK: usize, -// const SRANK: usize, -// Scalar, -// const R: usize, -// const C: usize, -// const B: usize, -// > = TensorView<'a, TOTAL_RANK, DRANK, SRANK, Scalar, BatchMat, R, C, B>; - -// /// rank-1 tensor view of scalars with shape [D0] -// pub type TensorViewD<'a, Scalar> = TensorViewX<'a, 1, Scalar>; - -// /// rank-2 tensor view of scalars with shape [D0 x D1] -// pub type TensorViewDD<'a, Scalar> = TensorViewX<'a, 2, Scalar>; - -// /// rank-2 tensor view of batched scalars with shape [D0 x B] -// pub type TensorViewDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 2, 1, 1, Scalar, B>; - -// /// rank-2 tensor view of vectors with shape [D0 x R] -// pub type TensorViewDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 2, 1, 1, Scalar, R>; - -// /// rank-3 tensor view of scalars with shape [D0 x R x B] -// pub type TensorViewDDD<'a, Scalar> = TensorViewX<'a, 3, Scalar>; - -// /// rank-3 tensor view of batched scalars with shape [D0 x D1 x B] -// pub type TensorViewDDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 3, 2, 1, Scalar, B>; - -// /// rank-3 tensor view of vectors with shape [D0 x D1 x R] -// pub type TensorViewDDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 3, 2, 1, Scalar, R>; - -// /// rank-3 tensor view of batched vectors with shape [D0 x R x B] -// pub type TensorViewDRB<'a, Scalar, const R: usize, const B: usize> = -// TensorViewXRB<'a, 3, 1, 2, Scalar, R, B>; - -// /// rank-3 tensor view of matrices with shape [D0 x R x C] -// pub type TensorViewDRC<'a, Scalar, const R: usize, const C: usize> = -// TensorViewXRC<'a, 3, 1, 2, Scalar, R, C>; - -// /// rank-4 tensor view of scalars with shape [D0 x D1 x D2 x D3] -// pub type TensorViewDDDD<'a, Scalar> = TensorViewX<'a, 4, Scalar>; - -// /// rank-4 tensor view of batched scalars with shape [D0 x D1 x D2 x B] -// pub type TensorViewDDDB<'a, Scalar, const B: usize> = TensorViewXB<'a, 4, 3, 1, Scalar, B>; - -// /// rank-4 tensor view of vectors with shape [D0 x D1 x D2 x R] -// pub type TensorViewDDDR<'a, Scalar, const R: usize> = TensorViewXR<'a, 4, 3, 1, Scalar, R>; - -// /// rank-4 tensor view of batched vectors with shape [D0 x D1 x R x B] -// pub type TensorViewDDRB<'a, Scalar, const R: usize, const B: usize> = -// TensorViewXRB<'a, 4, 2, 2, Scalar, R, B>; - -// /// rank-4 tensor view of matrices with shape [D0 x R x C x B] -// pub type TensorViewDDRC<'a, Scalar, const R: usize, const C: usize> = -// TensorViewXRC<'a, 4, 2, 2, Scalar, R, C>; - -// /// rank-4 tensor view of batched matrices with shape [D0 x R x C x B] -// pub type TensorViewDRCB<'a, Scalar, const R: usize, const C: usize, const B: usize> = -// TensorViewXRCB<'a, 4, 1, 3, Scalar, R, C, B>; - -/// rank-1 tensor of scalars -pub type ArcTensorX = - ArcTensor; - -/// rank-2 tensor of batched scalars -pub type ArcTensorXB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const B: usize, -> = ArcTensor, 1, 1, B>; - -/// rank-2 tensor of vectors with shape R -pub type ArcTensorXR< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, -> = ArcTensor, R, 1, 1>; - -/// rank-2 tensor of batched vectors with shape [R x B] -pub type ArcTensorXRB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const B: usize, -> = ArcTensor, R, 1, B>; - -/// rank-2 tensor of matrices with shape [R x C] -pub type ArcTensorXRC< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const C: usize, -> = ArcTensor, R, C, 1>; - -/// rank-2 tensor of batched matrices with shape [R x C x B] -pub type ArcTensorXRCB< - const TOTAL_RANK: usize, - const DRANK: usize, - const SRANK: usize, - Scalar, - const R: usize, - const C: usize, - const B: usize, -> = ArcTensor, R, C, B>; - -/// rank-1 tensor of scalars with shape D0 -pub type ArcTensorD = ArcTensorX; - -/// rank-2 tensor of scalars with shape [D0 x D1] -pub type ArcTensorDD = ArcTensorX<2, Scalar>; - -/// rank-2 tensor of batched scalars with shape [D0 x B] -pub type ArcTensorDB = ArcTensorXB<2, 1, 1, Scalar, B>; - -/// rank-2 tensor of vectors with shape [D0 x R] -pub type ArcTensorDR = ArcTensorXR<2, 1, 1, Scalar, R>; - -/// rank-3 tensor of scalars with shape [D0 x D1 x D2] -pub type ArcTensorRRR = ArcTensorX<3, Scalar>; - -/// rank-3 tensor of batched scalars with shape [D0 x D1 x B] -pub type ArcTensorDDB = ArcTensorXB<3, 2, 1, Scalar, B>; - -/// rank-3 tensor of vectors with shape [D0 x D1 x R] -pub type ArcTensorDDR = ArcTensorXR<3, 2, 1, Scalar, R>; - -/// rank-3 tensor of batched vectors with shape [D0 x R x B] -pub type ArcTensorRBD = ArcTensorXRB<3, 1, 2, Scalar, R, B>; - -/// rank-3 tensor of matrices with shape [D0 x R x C] -pub type ArcTensorDRC = ArcTensorXRC<3, 1, 2, Scalar, R, C>; - -/// rank-4 tensor of scalars with shape [D0 x D1 x D2 x D3] -pub type ArcTensorDDDD = ArcTensorX<4, Scalar>; - -/// rank-4 tensor of batched scalars with shape [D0 x D1 x D2 x B] -pub type ArcTensorDDDB = ArcTensorXB<4, 3, 1, Scalar, B>; - -/// rank-4 tensor of vectors with shape [D0 x D1 x D2 x R] -pub type ArcTensorDDDR = ArcTensorXR<4, 3, 1, Scalar, R>; - -/// rank-4 tensor of batched vectors with shape [D0 x D1 x R x B] -pub type ArcTensorDDRB = - ArcTensorXRB<4, 2, 2, Scalar, R, B>; - -/// rank-4 tensor of matrices with shape [D0 x R x C x B] -pub type ArcTensorDDRC = - ArcTensorXRC<4, 2, 2, Scalar, R, C>; - -/// rank-4 tensor of batched matrices with shape [D0 x R x C x B] -pub type ArcTensorDRCB = - ArcTensorXRCB<4, 1, 3, Scalar, R, C, B>; - -macro_rules! arc_tensor_is_tensor_view { - ($scalar_rank:literal, $srank:literal,$drank:literal) => { - - - impl< - 'a, - Scalar: IsTensorScalar + 'static, - STensor: IsStaticTensor + 'static, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, - > IsTensorLike< - 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - > for ArcTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { - fn elem_view<'b:'a>( - &'b self, - ) -> ndarray::ArrayView<'a, STensor, ndarray::Dim<[ndarray::Ix; $drank]>> { - self.view().elem_view - } - - fn get(& self, idx: [usize; $drank]) -> STensor { - self.view().get(idx) - } - - fn dims(&self) -> [usize; $drank] { - self.view().dims() - } - - fn scalar_view<'b:'a>( - &'b self, - ) -> ndarray::ArrayView<'a, Scalar, ndarray::Dim<[ndarray::Ix; $scalar_rank]>> { - self.view().scalar_view - } - - fn scalar_get(&'a self, idx: [usize; $scalar_rank]) -> Scalar { - self.view().scalar_get(idx) - } - - fn scalar_dims(&self) -> [usize; $scalar_rank] { - self.view().scalar_dims() - } - - fn to_mut_tensor( - &self, - ) -> MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> { - MutTensor { - mut_array: self.elem_view().to_owned(), - phantom: PhantomData::default(), - } - } - } - - impl< - 'a, - Scalar: IsTensorScalar+ 'static, - STensor: IsStaticTensor + 'static, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, - > - ArcTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { - - /// create a new tensor from a tensor view - pub fn make_copy_from( - v: &TensorView<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - ) -> Self - { - Self::from_mut_tensor(IsTensorLike::to_mut_tensor(v)) - } - - /// create a new tensor from a mutable tensor - pub fn from_mut_tensor( - tensor: - MutTensor<$scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE>, - ) -> Self { - Self { - array: tensor.mut_array.into(), - phantom: PhantomData {}, - } - } - - /// create a new tensor from a shape - all elements are zero - pub fn from_shape(size: [usize; $drank]) -> Self { - Self::from_mut_tensor( - MutTensor::< - $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - >::from_shape(size)) - } - - /// create a new tensor from a shape and a value - pub fn from_shape_and_val( - shape: [usize; $drank], - val:STensor, - ) -> Self { - Self::from_mut_tensor( - MutTensor::< - $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - >::from_shape_and_val(shape, val), - ) - } - - /// return a tensor view - pub fn view<'b: 'a>(&'b self) - -> TensorView< - 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE> - { - TensorView::< - 'a, $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - >::new( - self.array.view() - ) - } - - /// create a new tensor from a unary operation applied to a tensor view - pub fn from_map< - 'b, - const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar+ 'static, - OtherSTensor: IsStaticTensor< - OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, OTHER_BATCHES - > + 'static, - const OTHER_ROWS: usize, const OTHER_COLS: usize,const OTHER_BATCHES: usize, - V : IsTensorView::< - 'b, - OTHER_HRANK, $drank, OTHER_SRANK, - OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS,OTHER_BATCHES - >, - F: FnMut(&OtherSTensor)-> STensor - >( - view: &'b V, - op: F, - ) - -> Self - where - ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, - ndarray::Dim<[ndarray::Ix; $drank]>: ndarray::Dimension, - { - Self::from_mut_tensor( - MutTensor::< - $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - >::from_map(view, op), - ) - } - - /// create a new tensor from a binary operation applied to two tensor views - pub fn from_map2< - 'b, - const OTHER_HRANK: usize, const OTHER_SRANK: usize, - OtherScalar: IsTensorScalar + 'static, - OtherSTensor: IsStaticTensor< - OtherScalar, OTHER_SRANK, OTHER_ROWS, OTHER_COLS, OTHER_BATCHES - > + 'static, - const OTHER_ROWS: usize, const OTHER_COLS: usize, const OTHER_BATCHES: usize, - V : IsTensorView::< - 'b, - OTHER_HRANK, $drank, OTHER_SRANK, - OtherScalar, OtherSTensor, - OTHER_ROWS, OTHER_COLS, OTHER_BATCHES - >, - const OTHER_HRANK2: usize, const OTHER_SRANK2: usize, - OtherScalar2: IsTensorScalar + 'static, - OtherSTensor2: IsStaticTensor< - OtherScalar2, OTHER_SRANK2, OTHER_ROWS2, OTHER_COLS2, OTHER_BATCHES2 - > + 'static, - const OTHER_ROWS2: usize, const OTHER_COLS2: usize, const OTHER_BATCHES2: usize, - V2 : IsTensorView::<'b, - OTHER_HRANK2, $drank, OTHER_SRANK2, - OtherScalar2, OtherSTensor2, - OTHER_ROWS2, OTHER_COLS2, OTHER_BATCHES2 - >, - F: FnMut(&OtherSTensor, &OtherSTensor2) -> STensor - > ( - view: &'b V, - view2: &'b V2, - op: F, - ) - -> Self where - ndarray::Dim<[ndarray::Ix; OTHER_HRANK]>: ndarray::Dimension, - ndarray::Dim<[ndarray::Ix; OTHER_HRANK2]>: ndarray::Dimension - { - Self::from_mut_tensor( - MutTensor::< - $scalar_rank, $drank, $srank, Scalar, STensor, ROWS, COLS, BATCH_SIZE - >::from_map2(view,view2, op), - ) - } - } - }; -} - -impl InnerVecToMat<3, 1, 2, 4, 2, Scalar, ROWS> - for ArcTensorXR<3, 2, 1, Scalar, ROWS> -{ - fn inner_vec_to_mat(self) -> ArcTensorXRC<4, 2, 2, Scalar, ROWS, 1> { - ArcTensorXRC::<4, 2, 2, Scalar, ROWS, 1> { - array: self.array, - phantom: PhantomData, - } - } - - type Output = ArcTensorXRC<4, 2, 2, Scalar, ROWS, 1>; -} - -impl InnerScalarToVec<2, 0, 2, 3, 1, Scalar> - for ArcTensorX<2, Scalar> -{ - fn inner_scalar_to_vec(self) -> ArcTensorXR<3, 2, 1, Scalar, 1> { - ArcTensorXR::<3, 2, 1, Scalar, 1> { - array: self.array.map(|x| SVec::::new(*x)).to_shared(), - phantom: PhantomData, - } - } - - type Output = ArcTensorXR<3, 2, 1, Scalar, 1>; -} - -arc_tensor_is_tensor_view!(1, 0, 1); -arc_tensor_is_tensor_view!(2, 0, 2); -arc_tensor_is_tensor_view!(2, 1, 1); -arc_tensor_is_tensor_view!(3, 0, 3); -arc_tensor_is_tensor_view!(3, 1, 2); -arc_tensor_is_tensor_view!(3, 2, 1); -arc_tensor_is_tensor_view!(4, 0, 4); -arc_tensor_is_tensor_view!(4, 1, 3); -arc_tensor_is_tensor_view!(4, 2, 2); -arc_tensor_is_tensor_view!(4, 3, 1); - -#[cfg(test)] -mod tests { - use crate::element::SVec; - - #[test] - fn from_mut_tensor() { - use super::*; - - use crate::mut_tensor::MutTensorDDDR; - use crate::mut_tensor::MutTensorDDR; - use crate::mut_tensor::MutTensorDR; - - { - let shape = [4]; - let mut_img = MutTensorDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let copy = MutTensorDR::make_copy_from(&mut_img.view()); - assert_eq!(copy.view().dims(), shape); - let img = ArcTensorDR::from_mut_tensor(copy); - assert_eq!(img.view().dims(), shape); - let mut_img2 = ArcTensorDR::from_mut_tensor(mut_img.clone()); - assert_eq!( - mut_img2.view().elem_view().as_slice().unwrap(), - mut_img.view().elem_view().as_slice().unwrap() - ); - } - { - let shape = [4, 2]; - let mut_img = MutTensorDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let copy = MutTensorDDR::make_copy_from(&mut_img.view()); - assert_eq!(copy.dims(), shape); - let img = ArcTensorDDR::from_mut_tensor(copy); - assert_eq!(img.dims(), shape); - assert_eq!( - img.view().elem_view().as_slice().unwrap(), - mut_img.view().elem_view().as_slice().unwrap() - ); - } - { - let shape = [3, 2, 7]; - let mut_img = MutTensorDDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let copy = MutTensorDDDR::make_copy_from(&mut_img.view()); - assert_eq!(copy.dims(), shape); - let img = ArcTensorDDDR::from_mut_tensor(copy); - assert_eq!(img.dims(), shape); - assert_eq!( - img.view().elem_view().as_slice().unwrap(), - mut_img.view().elem_view().as_slice().unwrap() - ); - } - } - - #[test] - fn shared_ownership() { - use super::*; - - use crate::mut_tensor::MutTensorDDDR; - use crate::mut_tensor::MutTensorDDR; - use crate::mut_tensor::MutTensorDR; - { - let shape = [4]; - let mut_img = MutTensorDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let img = ArcTensorDR::from_mut_tensor(mut_img); - - let img2 = img.clone(); - assert_eq!( - img.view().elem_view().as_slice().unwrap(), - img2.view().elem_view().as_slice().unwrap() - ); - - let mut_img2 = img2.to_mut_tensor(); - assert_ne!( - mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), - img2.view().elem_view().as_slice().unwrap().as_ptr() - ); - } - { - let shape = [4, 6]; - let mut_img = MutTensorDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let img = ArcTensorDDR::from_mut_tensor(mut_img); - - let img2 = img.clone(); - let mut_img2 = img2.to_mut_tensor(); - assert_ne!( - mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), - img2.view().elem_view().as_slice().unwrap().as_ptr() - ); - } - { - let shape = [4, 6, 7]; - let mut_img = MutTensorDDDR::from_shape_and_val(shape, SVec::::new(0.5f32)); - let img = ArcTensorDDDR::from_mut_tensor(mut_img); - - let img2 = img.clone(); - let mut_img2 = img2.to_mut_tensor(); - assert_ne!( - mut_img2.view().elem_view().as_slice().unwrap().as_ptr(), - img2.view().elem_view().as_slice().unwrap().as_ptr() - ); - } - } - - #[test] - fn multi_threading() { - use crate::arc_tensor::ArcTensorDDRC; - use crate::mut_tensor::MutTensorDDRC; - use std::thread; - - let shape = [4, 6]; - let mut_img = MutTensorDDRC::from_shape_and_val(shape, SVec::::new(10, 20, 300)); - let img = ArcTensorDDRC::from_mut_tensor(mut_img); - - thread::scope(|s| { - s.spawn(|| { - println!("{:?}", img); - }); - s.spawn(|| { - println!("{:?}", img); - }); - }); - } -} diff --git a/crates/sophus_tensor/src/element.rs b/crates/sophus_tensor/src/element.rs deleted file mode 100644 index 8d627a5..0000000 --- a/crates/sophus_tensor/src/element.rs +++ /dev/null @@ -1,374 +0,0 @@ -use simba::simd::AutoSimd; -use std::fmt::Debug; - -pub use typenum::generic_const_mappings::Const; - -/// Number category -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum NumberCategory { - /// Real number such as f32 or f64 - Real, - /// Unsigned integer such as u8, u16, u32, or u64 - Unsigned, - /// Signed integer such as i8, i16, i32, or i64 - Signed, -} - -/// Trait for scalar and batch scalar types -pub trait IsTensorScalarLike: Copy + Clone + Debug { - /// Get the number category - fn number_category() -> NumberCategory; -} - -/// Trait for scalar types -pub trait IsTensorScalar: IsTensorScalarLike + num_traits::Zero + nalgebra::Scalar {} - -impl IsTensorScalarLike for f32 { - fn number_category() -> NumberCategory { - NumberCategory::Real - } -} -impl IsTensorScalarLike for f64 { - fn number_category() -> NumberCategory { - NumberCategory::Real - } -} -impl IsTensorScalarLike for u8 { - fn number_category() -> NumberCategory { - NumberCategory::Unsigned - } -} -impl IsTensorScalarLike for u16 { - fn number_category() -> NumberCategory { - NumberCategory::Unsigned - } -} -impl IsTensorScalarLike for u32 { - fn number_category() -> NumberCategory { - NumberCategory::Unsigned - } -} -impl IsTensorScalarLike for u64 { - fn number_category() -> NumberCategory { - NumberCategory::Unsigned - } -} -impl IsTensorScalarLike for i8 { - fn number_category() -> NumberCategory { - NumberCategory::Signed - } -} -impl IsTensorScalarLike for i16 { - fn number_category() -> NumberCategory { - NumberCategory::Signed - } -} -impl IsTensorScalarLike for i32 { - fn number_category() -> NumberCategory { - NumberCategory::Signed - } -} -impl IsTensorScalarLike for i64 { - fn number_category() -> NumberCategory { - NumberCategory::Signed - } -} - -impl IsTensorScalar for f32 {} -impl IsTensorScalar for f64 {} -impl IsTensorScalar for u8 {} -impl IsTensorScalar for u16 {} -impl IsTensorScalar for u32 {} -impl IsTensorScalar for u64 {} -impl IsTensorScalar for i8 {} -impl IsTensorScalar for i16 {} -impl IsTensorScalar for i32 {} -impl IsTensorScalar for i64 {} - -/// Trait for batch scalar types -pub trait IsBatchScalar: IsTensorScalarLike {} - -impl IsTensorScalarLike - for AutoSimd<[Scalar; BATCH_SIZE]> -{ - fn number_category() -> NumberCategory { - Scalar::number_category() - } -} -impl IsBatchScalar - for AutoSimd<[Scalar; BATCH_SIZE]> -{ -} - -/// Static vector -pub type SVec = nalgebra::SVector; -/// Static matrix -pub type SMat = - nalgebra::SMatrix; - -/// Batch scalar -pub type BatchScalar = AutoSimd<[ScalarLike; BATCH_SIZE]>; -/// Batch vector -pub type BatchVec = - nalgebra::SVector, ROWS>; -/// Batch matrix -pub type BatchMat = - nalgebra::SMatrix, ROWS, COLS>; - -/// Trait for static tensors -pub trait IsStaticTensor< - Scalar: IsTensorScalar + 'static, - const SRANK: usize, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, ->: Copy + Clone + Debug -{ - /// Create a tensor from a slice - fn from_slice(slice: &[Scalar]) -> Self; - - /// Create a zero tensor - fn zero() -> Self; - - /// Get the number category - fn number_category() -> NumberCategory { - Scalar::number_category() - } - - /// Returns ith scalar element - fn scalar(&self, idx: [usize; SRANK]) -> &Scalar; - - /// Get the rank - fn rank(&self) -> usize { - SRANK - } - - /// Get the number of batches - fn num_batches(&self) -> usize { - BATCH_SIZE - } - - /// Get the number of rows - fn num_rows(&self) -> usize { - ROWS - } - - /// Get the number of columns - fn num_cols(&self) -> usize { - COLS - } - - /// Get the compile time shape as an array - fn sdims() -> [usize; SRANK]; - - /// Number of scalar elements - fn num_scalars() -> usize { - BATCH_SIZE * ROWS * COLS - } - - /// Get the stride as an array - fn strides() -> [usize; SRANK]; -} - -// Rank 0 tensors -// -// a scalar -impl IsStaticTensor for Scalar { - fn zero() -> Self { - Scalar::zero() - } - - fn scalar(&self, _idx: [usize; 0]) -> &Scalar { - self - } - - fn sdims() -> [usize; 0] { - [] - } - - fn strides() -> [usize; 0] { - [] - } - - fn from_slice(slice: &[Scalar]) -> Self { - slice[0] - } -} - -// RANK 1 TENSORS -// -// A batch ofBatchScalar scalars -impl - IsStaticTensor for BatchScalar -{ - fn zero() -> Self { - todo!() - } - - fn scalar(&self, idx: [usize; 1]) -> &Scalar { - &self.0[idx[0]] - } - - fn sdims() -> [usize; 1] { - [BATCH_SIZE] - } - - fn strides() -> [usize; 1] { - [1] - } - - fn from_slice(_slice: &[Scalar]) -> Self { - todo!("BatchScalar::from_slice") - } -} - -// A vector -impl IsStaticTensor - for SVec -{ - fn zero() -> Self { - Self::zeros() - } - - fn scalar(&self, idx: [usize; 1]) -> &Scalar { - &self[idx[0]] - } - - fn sdims() -> [usize; 1] { - [ROWS] - } - - fn strides() -> [usize; 1] { - [1] - } - - fn from_slice(slice: &[Scalar]) -> Self { - let mut v = Self::zeros(); - v.copy_from_slice(slice); - v - } -} - -// RANK 2 TENSORS -// -// A batch of vectors -impl - IsStaticTensor for SVec, ROWS> -{ - fn zero() -> Self { - //Self::zeros() - todo!() - } - - fn scalar(&self, idx: [usize; 2]) -> &Scalar { - &self[idx[1]].0[idx[0]] - } - - fn sdims() -> [usize; 2] { - [BATCH_SIZE, ROWS] - } - - fn strides() -> [usize; 2] { - [1, BATCH_SIZE] - } - - fn from_slice(_slice: &[Scalar]) -> Self { - todo!("SVec, ROWS>::from_slice") - } -} - -// a matrix -impl - IsStaticTensor for SMat -{ - fn zero() -> Self { - Self::zeros() - } - - fn scalar(&self, idx: [usize; 2]) -> &Scalar { - &self[(idx[0], idx[1])] - } - - fn sdims() -> [usize; 2] { - [ROWS, COLS] - } - - fn strides() -> [usize; 2] { - [1, ROWS] - } - - fn from_slice(slice: &[Scalar]) -> Self { - let mut v = Self::zeros(); - v.copy_from_slice(slice); - v - } -} - -// RANK 3 TENSORS - -// a batch of matrices -impl< - Scalar: IsTensorScalar + 'static, - const BATCH_SIZE: usize, - const ROWS: usize, - const COLS: usize, - > IsStaticTensor - for SMat, ROWS, COLS> -{ - fn zero() -> Self { - todo!() - } - fn scalar(&self, idx: [usize; 3]) -> &Scalar { - &self[(idx[1], idx[2])].0[idx[0]] - } - - fn sdims() -> [usize; 3] { - [ROWS, COLS, BATCH_SIZE] - } - - fn strides() -> [usize; 3] { - [1, BATCH_SIZE, BATCH_SIZE * ROWS] - } - - fn from_slice(_slice: &[Scalar]) -> Self { - todo!("SMat, ROWS, COLS>::from_slice") - } -} - -/// Format of a static tensor -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct STensorFormat { - /// Number category - pub number_category: NumberCategory, - /// Number of bytes per scalar - pub num_bytes_per_scalar: usize, - /// batch size - pub batch_size: usize, - /// number of rows - pub num_rows: usize, - /// number of columns - pub num_cols: usize, -} - -impl STensorFormat { - /// Create a new tensor format struct - pub fn new< - Scalar: IsTensorScalar + 'static, - const ROWS: usize, - const COLS: usize, - const BATCH_SIZE: usize, - >() -> Self { - STensorFormat { - number_category: Scalar::number_category(), - num_rows: ROWS, - num_cols: COLS, - batch_size: BATCH_SIZE, - num_bytes_per_scalar: std::mem::size_of::(), - } - } - - /// Number of bytes - pub fn num_bytes(&self) -> usize { - self.batch_size * self.num_rows * self.num_cols * self.num_bytes_per_scalar - } -} diff --git a/rust-toolchain.toml b/rust-toolchain.toml new file mode 100644 index 0000000..5d56faf --- /dev/null +++ b/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" diff --git a/sophus-rs.code-workspace b/sophus-rs.code-workspace index 8f5f33f..0b9beda 100644 --- a/sophus-rs.code-workspace +++ b/sophus-rs.code-workspace @@ -6,7 +6,11 @@ ], "settings": { "rust-analyzer.linkedProjects": [ - "./Cargo.toml" - ] + "./Cargo.toml", + ], + "rust-analyzer.cargo.target": null, + "rust-analyzer.diagnostics.styleLints.enable": true, + "rust-analyzer.imports.granularity.group": "item", + "rust-analyzer.imports.prefix": "crate", } -} +} \ No newline at end of file