diff --git a/Cargo.toml b/Cargo.toml index eb33238..3dd5331 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,17 +21,17 @@ include = [ keywords = ["robotics", "optimization"] license = "MIT OR Apache-2.0" repository = "https://github.com/farm-ng/sophus-rs/" -version = "0.6.0" +version = "0.6.1" [workspace.dependencies] -sophus = {path = "crates/sophus", version = "0.6.0"} -sophus_core = {path = "crates/sophus_core", version = "0.6.0"} -sophus_image = {path = "crates/sophus_image", version = "0.6.0"} -sophus_lie = {path = "crates/sophus_lie", version = "0.6.0"} -sophus_opt = {path = "crates/sophus_opt", version = "0.6.0"} -sophus_pyo3 = {path = "crates/sophus_pyo3", version = "0.6.0"} -sophus_sensor = {path = "crates/sophus_sensor", version = "0.6.0"} -sophus_viewer = {path = "crates/sophus_viewer", version = "0.6.0"} +sophus = {path = "crates/sophus", version = "0.6.1"} +sophus_core = {path = "crates/sophus_core", version = "0.6.1"} +sophus_image = {path = "crates/sophus_image", version = "0.6.1"} +sophus_lie = {path = "crates/sophus_lie", version = "0.6.1"} +sophus_opt = {path = "crates/sophus_opt", version = "0.6.1"} +sophus_pyo3 = {path = "crates/sophus_pyo3", version = "0.6.1"} +sophus_sensor = {path = "crates/sophus_sensor", version = "0.6.1"} +sophus_viewer = {path = "crates/sophus_viewer", version = "0.6.1"} approx = "0.5" as-any = "0.3" @@ -43,13 +43,14 @@ dyn-clone = "1.0" eframe = {version = "0.27", features = ["wgpu"]} egui_extras = "0.27" env_logger = "0.11" -faer = "0.18" +faer = "0.19" hollywood = {version = "0.7.0"} image = {version = "0.25", features = [ "jpeg", "png", "tiff", ]} +linked-hash-map = "0.5" log = "0.4" nalgebra = {version = "0.32", features = ["rand"]} ndarray = {version = "0.15", features = ["approx-0_5"]} diff --git a/crates/sophus/examples/pose_graph.rs b/crates/sophus/examples/pose_graph.rs index 3db214c..eb6209a 100644 --- a/crates/sophus/examples/pose_graph.rs +++ b/crates/sophus/examples/pose_graph.rs @@ -7,9 +7,7 @@ use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; use sophus::viewer::actor::ViewerConfig; -use sophus::viewer::renderable::*; -use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; -use sophus::viewer::SimpleViewer; +use sophus::viewer::renderables::*; use sophus_core::linalg::VecF64; use sophus_image::ImageSize; use sophus_lie::Isometry2; @@ -17,6 +15,14 @@ use sophus_lie::Isometry3; use sophus_sensor::camera_enum::perspective_camera::PerspectiveCameraEnum; use sophus_sensor::dyn_camera::DynCamera; use sophus_sensor::KannalaBrandtCamera; +use sophus_viewer::interactions::WgpuClippingPlanes; +use sophus_viewer::simple_viewer::SimpleViewer; + +use crate::color::Color; +use crate::renderable3d::Line3; +use crate::renderable3d::Lines3; +use crate::renderable3d::Renderable3d; +use crate::renderable3d::View3dPacket; #[actor(ContentGeneratorMessage, NullInRequestMessage)] type ContentGenerator = Actor< @@ -50,7 +56,7 @@ pub struct ContentGeneratorState {} #[actor_outputs] pub struct ContentGeneratorOutbound { /// curves - pub packets: OutboundChannel>>, + pub packets: OutboundChannel>, } fn make_axes(world_from_local_poses: Vec>) -> Vec { @@ -105,18 +111,50 @@ impl HasOnMessage for ContentGeneratorMessage { ContentGeneratorMessage::ClockTick(_time_in_seconds) => { let pose_graph = PoseCircleProblem::new(25); - let renderables = vec![ - Renderable::Lines3(Lines3 { + // Camera / view pose parameters + let intrinsics = KannalaBrandtCamera::::new( + &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), + ImageSize { + width: 640, + height: 480, + }, + ); + let scene_from_camera = Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -50.0)); + let clipping_planes = WgpuClippingPlanes { + near: 0.1, + far: 1000.0, + }; + + let initial_camera = ViewerCamera { + intrinsics: DynCamera::::from_model( + PerspectiveCameraEnum::KannalaBrandt(intrinsics), + ), + clipping_planes, + scene_from_camera, + }; + + let mut packet = View3dPacket { + view_label: "view0".to_owned(), + renderables3d: vec![], + initial_camera, + }; + + packet.renderables3d = vec![ + Renderable3d::Lines3(Lines3 { name: "true".into(), lines: make_axes(pose_graph.true_world_from_robot.clone()), }), - Renderable::Lines3(Lines3 { + Renderable3d::Lines3(Lines3 { name: "est".into(), lines: make_axes(pose_graph.est_world_from_robot.clone()), }), ]; - outbound.packets.send(Stream { msg: renderables }); + outbound.packets.send(Stream { + msg: Packets { + packets: vec![Packet::View3d(packet)], + }, + }); } } } @@ -129,28 +167,7 @@ impl IsInboundMessageNew for ContentGeneratorMessage { } pub async fn run_viewer_example() { - // Camera / view pose parameters - let intrinsics = KannalaBrandtCamera::::new( - &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), - ImageSize { - width: 640, - height: 480, - }, - ); - let scene_from_camera = Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -50.0)); - let clipping_planes = WgpuClippingPlanes { - near: 0.1, - far: 1000.0, - }; - let camera = ViewerCamera { - intrinsics: DynCamera::::from_model(PerspectiveCameraEnum::KannalaBrandt( - intrinsics, - )), - clipping_planes, - scene_from_camera, - }; - - let mut builder = ViewerBuilder::from_config(ViewerConfig { camera }); + let mut builder = ViewerBuilder::from_config(ViewerConfig {}); // Pipeline configuration let pipeline = Hollywood::configure(&mut |context| { @@ -164,7 +181,7 @@ pub async fn run_viewer_example() { ContentGeneratorState::default(), ); // 3. The viewer actor - let mut viewer = EguiActor::, (), Isometry3, (), ()>::from_builder( + let mut viewer = EguiActor::, (), ()>::from_builder( context, &builder, ); diff --git a/crates/sophus/examples/viewer_ex.rs b/crates/sophus/examples/viewer_ex.rs index 05a211f..2814449 100644 --- a/crates/sophus/examples/viewer_ex.rs +++ b/crates/sophus/examples/viewer_ex.rs @@ -1,20 +1,27 @@ use hollywood::actors::egui::EguiActor; use hollywood::actors::egui::Stream; use hollywood::prelude::*; -use nalgebra::SVector; -use sophus::image::arc_image::ArcImage4F32; +use sophus::examples::viewer_example::make_example_image; use sophus::image::ImageSize; -use sophus::prelude::*; use sophus::viewer::actor::run_viewer_on_main_thread; use sophus::viewer::actor::ViewerBuilder; use sophus::viewer::actor::ViewerCamera; use sophus::viewer::actor::ViewerConfig; -use sophus::viewer::renderable::*; -use sophus::viewer::scene_renderer::interaction::WgpuClippingPlanes; -use sophus::viewer::SimpleViewer; -use sophus_core::linalg::VecF64; +use sophus::viewer::renderables::*; use sophus_lie::Isometry3; -use sophus_sensor::DynCamera; +use sophus_viewer::renderables::color::Color; +use sophus_viewer::renderables::renderable2d::Points2; +use sophus_viewer::renderables::renderable2d::Renderable2d; +use sophus_viewer::renderables::renderable2d::View2dPacket; +use sophus_viewer::renderables::renderable3d::View3dPacket; +use sophus_viewer::simple_viewer::SimpleViewer; + +use crate::frame::Frame; +use crate::renderable2d::Lines2; +use crate::renderable3d::Lines3; +use crate::renderable3d::Mesh3; +use crate::renderable3d::Points3; +use crate::renderable3d::Renderable3d; #[actor(ContentGeneratorMessage, NullInRequestMessage)] type ContentGenerator = Actor< @@ -47,7 +54,7 @@ pub enum ContentGeneratorMessage { pub struct ContentGeneratorOutRequest { /// Check time-stamp of receiver pub scene_from_camera_request: - OutRequestChannel<(), Isometry3, ContentGeneratorMessage>, + OutRequestChannel, ContentGeneratorMessage>, } impl IsOutRequestHub for ContentGeneratorOutRequest { @@ -80,33 +87,70 @@ impl HasActivate for ContentGeneratorOutRequest { #[derive(Clone, Debug)] pub struct ContentGeneratorState { pub counter: u32, - pub show: bool, - pub intrinsics: DynCamera, - pub scene_from_camera: Isometry3, -} - -impl Default for ContentGeneratorState { - fn default() -> Self { - ContentGeneratorState { - counter: 0, - show: false, - intrinsics: DynCamera::new_kannala_brandt( - &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), - ImageSize { - width: 640, - height: 480, - }, - ), - scene_from_camera: Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -5.0)), - } - } } /// Outbound hub for the ContentGenerator. #[actor_outputs] pub struct ContentGeneratorOutbound { /// curves - pub packets: OutboundChannel>>, + pub packets: OutboundChannel>, +} + +fn create_view2_packet(image_size: ImageSize) -> Packet { + let img = make_example_image(image_size); + + let mut packet_2d = View2dPacket { + view_label: "view_2d".to_owned(), + renderables3d: vec![], + renderables2d: vec![], + frame: Some(Frame::from_image(&img)), + }; + + let points2 = Points2::make("points2", &[[16.0, 12.0], [32.0, 24.0]], &Color::red(), 5.0); + packet_2d.renderables2d.push(Renderable2d::Points2(points2)); + + let lines2 = Lines2::make( + "lines2", + &[[[0.0, 0.0], [100.0, 100.0]]], + &Color::red(), + 5.0, + ); + packet_2d.renderables2d.push(Renderable2d::Lines2(lines2)); + + Packet::View2d(packet_2d) +} + +fn create_view3_packet(initial_camera: ViewerCamera) -> Packet { + let mut packet_3d = View3dPacket { + view_label: "view_3d".to_owned(), + renderables3d: vec![], + initial_camera: initial_camera.clone(), + }; + + let trig_points = [[0.0, 0.0, -0.1], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]; + + let points3 = Points3::make("points3", &trig_points, &Color::red(), 5.0); + packet_3d.renderables3d.push(Renderable3d::Points3(points3)); + + let lines3 = Lines3::make( + "lines3", + &[ + [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], + ], + &Color::green(), + 5.0, + ); + packet_3d.renderables3d.push(Renderable3d::Lines3(lines3)); + + let blue = Color::blue(); + let mesh = Mesh3::make("mesh", &[(trig_points, blue)]); + packet_3d + .renderables3d + .push(renderable3d::Renderable3d::Mesh3(mesh)); + + Packet::View3d(packet_3d) } impl HasOnMessage for ContentGeneratorMessage { @@ -116,304 +160,32 @@ impl HasOnMessage for ContentGeneratorMessage { _prop: &Self::Prop, state: &mut Self::State, outbound: &Self::OutboundHub, - request: &ContentGeneratorOutRequest, + _request: &ContentGeneratorOutRequest, ) { match &self { ContentGeneratorMessage::ClockTick(_time_in_seconds) => { - let mut renderables = vec![]; + let initial_camera = ViewerCamera::default(); - let trig_points = [ - SVector::::new(0.0, 0.0, -0.1), - SVector::::new(0.0, 1.0, 0.0), - SVector::::new(1.0, 0.0, 0.0), - ]; - - let ttrig_points = [ - SVector::::new(0.1, 0.0, 0.3), - SVector::::new(0.1, 1.0, 0.3), - SVector::::new(1.1, 0.0, 0.3), - ]; + let mut packets = Packets { packets: vec![] }; if state.counter == 0 { - let points2 = vec![ - Point2 { - p: SVector::::new(16.0, 12.0), - color: Color { - r: 1.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - point_size: 5.0, - }, - Point2 { - p: SVector::::new(32.0, 24.0), - color: Color { - r: 1.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - point_size: 5.0, - }, - ]; - - let points3 = vec![ - Point3 { - p: trig_points[0], - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - point_size: 5.0, - }, - Point3 { - p: trig_points[1], - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - point_size: 5.0, - }, - Point3 { - p: trig_points[2], - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - point_size: 5.0, - }, - ]; - - let mesh = vec![Triangle3 { - p0: trig_points[0], - p1: trig_points[1], - p2: trig_points[2], - color: Color { - r: 1.0, - g: 0.0, - b: 0.0, - a: 0.5, - }, - }]; - - let textured_mesh = vec![TexturedTriangle3 { - p0: ttrig_points[0], - p1: ttrig_points[1], - p2: ttrig_points[2], - tex0: SVector::::new(0.0, 0.0), - tex1: SVector::::new(0.0, 0.0), - tex2: SVector::::new(0.0, 0.0), - }]; - - let line3 = vec![ - Line3 { - p0: SVector::::new(0.0, 0.0, 0.0), - p1: SVector::::new(1.0, 0.0, 0.0), - color: Color { - r: 1.0, - g: 0.0, - b: 0.0, - a: 1.0, - }, - line_width: 5.0, - }, - Line3 { - p0: SVector::::new(0.0, 0.0, 0.0), - p1: SVector::::new(0.0, 1.0, 0.0), - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 1.0, - }, - line_width: 5.0, - }, - Line3 { - p0: SVector::::new(0.0, 0.0, 0.0), - p1: SVector::::new(0.0, 0.0, 1.0), - color: Color { - r: 0.0, - g: 0.0, - b: 1.0, - a: 1.0, - }, - line_width: 5.0, - }, - ]; - - renderables.push(Renderable::Points2(Points2 { - name: "points2".to_owned(), - points: points2, - })); - renderables.push(Renderable::Points3(Points3 { - name: "points3".to_owned(), - points: points3, - })); - renderables.push(Renderable::Mesh3(Mesh3 { - name: "mesh".to_owned(), - mesh, - })); - - renderables.push(Renderable::TexturedMesh3(TexturedMesh3 { - name: "tex_mesh".to_owned(), - mesh: textured_mesh, - texture: ArcImage4F32::from_image_size_and_val( - ImageSize { - width: 2, - height: 2, - }, - nalgebra::SVector::::new(0.0, 0.0, 0.0, 0.0), - ), - })); - - renderables.push(Renderable::Lines3(Lines3 { - name: "line3".to_owned(), - lines: line3, - })); + packets + .packets + .push(create_view2_packet(initial_camera.intrinsics.image_size())); + + packets.packets.push(create_view3_packet(initial_camera)); } - let proj_line2 = vec![ - Line2 { - p0: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[0].cast()), - ) - .cast(), - p1: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[1].cast()), - ) - .cast(), - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 0.5, - }, - line_width: 2.0, - }, - Line2 { - p0: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[1].cast()), - ) - .cast(), - p1: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[2].cast()), - ) - .cast(), - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 0.5, - }, - line_width: 2.0, - }, - Line2 { - p0: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[2].cast()), - ) - .cast(), - p1: state - .intrinsics - .cam_proj( - &state - .scene_from_camera - .inverse() - .transform(&trig_points[0].cast()), - ) - .cast(), - color: Color { - r: 0.0, - g: 1.0, - b: 0.0, - a: 0.5, - }, - line_width: 2.0, - }, - ]; - - renderables.push(Renderable::Lines2(Lines2 { - name: "proj_line".to_owned(), - lines: proj_line2, - })); state.counter += 1; - if state.counter > 20 { - state.show = !state.show; - state.counter = 0; - } + outbound.packets.send(Stream { msg: packets }); - let lines = if state.show { - vec![ - Line2 { - p0: SVector::::new(16.0, 12.0), - p1: SVector::::new(32.0, 24.0), - color: Color { - r: 1.0, - g: 0.0, - b: 0.0, - a: 1.0, - }, - line_width: 5.0, - }, - Line2 { - p0: SVector::::new(-0.5, -0.5), - p1: SVector::::new(0.5, 0.5), - color: Color { - r: 1.0, - g: 0.0, - b: 0.0, - a: 1.0, - }, - line_width: 5.0, - }, - ] - } else { - vec![] - }; - - renderables.push(Renderable::Lines2(Lines2 { - name: "l".to_owned(), - lines, - })); - - outbound.packets.send(Stream { msg: renderables }); - - request.scene_from_camera_request.send_request(()); + // request + // .scene_from_camera_request + // .send_request("view_2d".to_owned()); } ContentGeneratorMessage::SceneFromCamera(reply) => { - state.scene_from_camera = reply.reply; + println!("{}", reply.reply); } } } @@ -432,26 +204,7 @@ impl IsInboundMessageNew>> for ContentGeneratorMe } pub async fn run_viewer_example() { - // Camera / view pose parameters - let intrinsics = DynCamera::new_kannala_brandt( - &VecF64::<8>::from_array([600.0, 600.0, 320.0, 240.0, 0.0, 0.0, 0.0, 0.0]), - ImageSize { - width: 640, - height: 480, - }, - ); - let scene_from_camera = Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -5.0)); - let clipping_planes = WgpuClippingPlanes { - near: 0.1, - far: 1000.0, - }; - let camera = ViewerCamera { - intrinsics: intrinsics.clone(), - clipping_planes, - scene_from_camera, - }; - - let mut builder = ViewerBuilder::from_config(ViewerConfig { camera }); + let mut builder = ViewerBuilder::from_config(ViewerConfig {}); // Pipeline configuration let pipeline = Hollywood::configure(&mut |context| { @@ -462,15 +215,10 @@ pub async fn run_viewer_example() { let mut content_generator = ContentGenerator::from_prop_and_state( context, NullProp {}, - ContentGeneratorState { - counter: 0, - show: false, - intrinsics: intrinsics.clone(), - scene_from_camera, - }, + ContentGeneratorState { counter: 0 }, ); // 3. The viewer actor - let mut viewer = EguiActor::, (), Isometry3, (), ()>::from_builder( + let mut viewer = EguiActor::, (), ()>::from_builder( context, &builder, ); diff --git a/crates/sophus/src/examples.rs b/crates/sophus/src/examples.rs new file mode 100644 index 0000000..8fd9007 --- /dev/null +++ b/crates/sophus/src/examples.rs @@ -0,0 +1 @@ +pub mod viewer_example; diff --git a/crates/sophus/src/examples/viewer_example.rs b/crates/sophus/src/examples/viewer_example.rs new file mode 100644 index 0000000..d99d42b --- /dev/null +++ b/crates/sophus/src/examples/viewer_example.rs @@ -0,0 +1,26 @@ +use sophus_image::arc_image::ArcImage4U8; +use sophus_image::mut_image::MutImage4U8; +use sophus_image::mut_image_view::IsMutImageView; +use sophus_image::ImageSize; + +/// Makes example image of image-size +pub fn make_example_image(image_size: ImageSize) -> ArcImage4U8 { + let mut img = MutImage4U8::from_image_size_and_val( + image_size, + nalgebra::SVector::::new(255, 0, 255, 255), + ); + + let w = image_size.width; + let h = image_size.height; + + for i in 0..10 { + for j in 0..10 { + img.mut_pixel(i, j).copy_from_slice(&[0, 0, 0, 255]); + img.mut_pixel(i, h - j - 1) + .copy_from_slice(&[255, 255, 255, 255]); + img.mut_pixel(w - i - 1, h - j - 1) + .copy_from_slice(&[0, 0, 255, 255]); + } + } + img.to_shared() +} diff --git a/crates/sophus/src/lib.rs b/crates/sophus/src/lib.rs index 419551a..4ae3763 100644 --- a/crates/sophus/src/lib.rs +++ b/crates/sophus/src/lib.rs @@ -1,11 +1,19 @@ #![feature(portable_simd)] #![allow(clippy::needless_range_loop)] +pub mod examples; + +#[doc(inline)] pub use sophus_core as core; +#[doc(inline)] pub use sophus_image as image; +#[doc(inline)] pub use sophus_lie as lie; +#[doc(inline)] pub use sophus_opt as opt; +#[doc(inline)] pub use sophus_sensor as sensor; +#[doc(inline)] pub use sophus_viewer as viewer; pub use hollywood; diff --git a/crates/sophus_core/src/lib.rs b/crates/sophus_core/src/lib.rs index 54ff6b5..05e99d0 100644 --- a/crates/sophus_core/src/lib.rs +++ b/crates/sophus_core/src/lib.rs @@ -1,7 +1,26 @@ #![feature(portable_simd)] #![deny(missing_docs)] #![allow(clippy::needless_range_loop)] -//! sophus core crate - part of the sophus-rs projectx +//! Core math functionality including +//! - linear algebra types +//! * such as [linalg::VecF64], and [linalg::MatF64] +//! * batch types such as [linalg::BatchScalarF64], [linalg::BatchVecF64], +//! [linalg::BatchMatF64] +//! - tensors +//! * design: dynamic tensor (ndarray) of static tensors (nalgebra) +//! - differentiation tools +//! * dual numbers: [calculus::dual::DualScalar], [calculus::dual::DualVector], +//! [calculus::dual::DualMatrix] +//! * [calculus::maps::curves] f: ℝ -> ℝ, f: ℝ -> ℝʳ, f: ℝ -> ℝʳ x ℝᶜ +//! * [calculus::maps::scalar_valued_maps]: f: ℝᵐ -> ℝ, f: ℝᵐ x ℝⁿ -> ℝ +//! * [calculus::maps::vector_valued_maps]: f: ℝᵐ -> ℝᵖ, f: ℝᵐ x ℝⁿ -> ℝᵖ +//! * [calculus::maps::matrix_valued_maps]: f: ℝᵐ -> ℝʳ x ℝᶜ, f: ℝᵐ x ℝⁿ -> ℝʳ x ℝᶜ +//! - splines +//! * [calculus::spline::CubicBSpline] +//! - intervals, regions +//! * closed interval: [calculus::region::Interval] +//! * closed region: [calculus::region::Interval] +//! - manifolds: [manifold::traits] /// calculus - differentiation, splines, and more pub mod calculus; diff --git a/crates/sophus_core/src/tensor/tensor_view.rs b/crates/sophus_core/src/tensor/tensor_view.rs index 661e5ea..de4d3db 100644 --- a/crates/sophus_core/src/tensor/tensor_view.rs +++ b/crates/sophus_core/src/tensor/tensor_view.rs @@ -28,8 +28,8 @@ use std::marker::PhantomData; /// 2. A scalar tensor of TOTAL_RANK = DRANK + SRANK. /// * ``self.scalar_dims()`` is used to access its dimensions of type /// ``[usize: TOTAL_RANK]`` at runtime. -/// * - an individual element (= static tensor) can be accessed with -/// ``self.scalar_get(idx)``, where idx is of type ``[usize: DRANK]``. +/// * an individual element (= static tensor) can be accessed with +/// ``self.scalar_get(idx)``, where idx is of type ``[usize: DRANK]``. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TensorView< 'a, diff --git a/crates/sophus_image/src/lib.rs b/crates/sophus_image/src/lib.rs index 502fbf1..f237429 100644 --- a/crates/sophus_image/src/lib.rs +++ b/crates/sophus_image/src/lib.rs @@ -42,6 +42,14 @@ impl ImageSize { pub fn area(&self) -> usize { self.width * self.height } + + /// Get the aspect ratio of the image - width / height + pub fn aspect_ratio(&self) -> f32 { + if self.area() == 0 { + return 1.0; + } + self.width as f32 / self.height as f32 + } } impl From<[usize; 2]> for ImageSize { @@ -63,6 +71,12 @@ impl From for [usize; 2] { } } +impl PartialEq for ImageSize { + fn eq(&self, other: &Self) -> bool { + self.width == other.width && self.height == other.height + } +} + /// sophus_image prelude pub mod prelude { pub use crate::image_view::IsImageView; diff --git a/crates/sophus_opt/src/ldlt.rs b/crates/sophus_opt/src/ldlt.rs index 5763e3f..e8f2a2e 100644 --- a/crates/sophus_opt/src/ldlt.rs +++ b/crates/sophus_opt/src/ldlt.rs @@ -242,8 +242,8 @@ impl SparseLdlt { ); let mut x = b.clone(); - let mut x_mut_slice = x.as_mut_slice(); - let mut x_ref = faer::mat::from_column_major_slice_mut::(&mut x_mut_slice, dim, 1); + let x_mut_slice = x.as_mut_slice(); + let mut x_ref = faer::mat::from_column_major_slice_mut::(x_mut_slice, dim, 1); faer::perm::permute_rows_in_place( faer::reborrow::ReborrowMut::rb_mut(&mut x_ref), perm_ref, diff --git a/crates/sophus_sensor/src/dyn_camera.rs b/crates/sophus_sensor/src/dyn_camera.rs index 74ca738..d946c11 100644 --- a/crates/sophus_sensor/src/dyn_camera.rs +++ b/crates/sophus_sensor/src/dyn_camera.rs @@ -24,6 +24,26 @@ pub type DynCamera = impl, const BATCH: usize, CameraType: IsCameraEnum> DynCameraFacade { + /// Create default pnhole from Image Size + pub fn default_pinhole(image_size: ImageSize) -> Self { + let w = image_size.width as f64; + let h = image_size.height as f64; + + let focal_length = (w + h) * 0.5; + Self { + camera_type: CameraType::new_pinhole( + &S::Vector::<4>::from_f64_array([ + focal_length, + focal_length, + 0.5 * w - 0.5, + 0.5 * h - 0.5, + ]), + image_size, + ), + phantom: std::marker::PhantomData, + } + } + /// Create a new dynamic camera facade from a camera model pub fn from_model(camera_type: CameraType) -> Self { Self { diff --git a/crates/sophus_viewer/Cargo.toml b/crates/sophus_viewer/Cargo.toml index 3b347b2..26bbc60 100644 --- a/crates/sophus_viewer/Cargo.toml +++ b/crates/sophus_viewer/Cargo.toml @@ -23,6 +23,7 @@ eframe.workspace = true egui_extras.workspace = true env_logger.workspace = true hollywood.workspace = true +linked-hash-map.workspace = true nalgebra.workspace = true ndarray.workspace = true tokio.workspace = true diff --git a/crates/sophus_viewer/src/actor.rs b/crates/sophus_viewer/src/actor.rs index ac7182a..6f305c3 100644 --- a/crates/sophus_viewer/src/actor.rs +++ b/crates/sophus_viewer/src/actor.rs @@ -1,14 +1,19 @@ -use crate::scene_renderer::interaction::WgpuClippingPlanes; -use crate::Renderable; -use crate::ViewerRenderState; use eframe::egui; use hollywood::actors::egui::EguiAppFromBuilder; use hollywood::actors::egui::GenericEguiBuilder; use hollywood::RequestWithReplyChannel; +use sophus_core::linalg::VecF64; +use sophus_image::ImageSize; +use sophus_lie::traits::IsTranslationProductGroup; use sophus_lie::Isometry3; use sophus_sensor::dyn_camera::DynCamera; +use crate::interactions::WgpuClippingPlanes; +use crate::renderables::Packets; +use crate::ViewerRenderState; + /// Viewer camera configuration. +#[derive(Clone, Debug)] pub struct ViewerCamera { /// Camera intrinsics pub intrinsics: DynCamera, @@ -18,36 +23,50 @@ pub struct ViewerCamera { pub scene_from_camera: Isometry3, } -/// Viewer configuration. -pub struct ViewerConfig { - /// Camera configuration - pub camera: ViewerCamera, +impl Default for ViewerCamera { + fn default() -> Self { + ViewerCamera::default_from(ImageSize::new(640, 480)) + } +} + +impl ViewerCamera { + /// Create default viewer camera from image size + pub fn default_from(image_size: ImageSize) -> ViewerCamera { + ViewerCamera { + intrinsics: DynCamera::default_pinhole(image_size), + clipping_planes: WgpuClippingPlanes::default(), + scene_from_camera: Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -5.0)), + } + } } +/// Viewer configuration. +pub struct ViewerConfig {} + /// Inbound message for the Viewer actor. #[derive(Clone, Debug)] pub enum ViewerMessage { /// Packets to render - Packets(Vec), + Packets(Packets), } /// Inbound message for the Viewer actor. #[derive(Debug)] pub enum ViewerInRequestMessage { /// Request the view pose - RequestViewPose(RequestWithReplyChannel<(), Isometry3>), + RequestViewPose(RequestWithReplyChannel>), } impl Default for ViewerMessage { fn default() -> Self { - ViewerMessage::Packets(Vec::default()) + ViewerMessage::Packets(Packets::default()) } } /// Builder for the Viewer actor. pub type ViewerBuilder = GenericEguiBuilder< - Vec, - RequestWithReplyChannel<(), Isometry3>, + Packets, + RequestWithReplyChannel>, (), (), ViewerConfig, diff --git a/crates/sophus_viewer/src/interactions.rs b/crates/sophus_viewer/src/interactions.rs new file mode 100644 index 0000000..8294323 --- /dev/null +++ b/crates/sophus_viewer/src/interactions.rs @@ -0,0 +1,98 @@ +/// in-plane interaction +pub mod inplane_interaction; +/// orbit interaction +pub mod orbit_interaction; + +use eframe::egui; +use sophus_core::linalg::VecF32; +use sophus_core::linalg::VecF64; +use sophus_image::arc_image::ArcImageF32; +use sophus_lie::Isometry3; +use sophus_sensor::dyn_camera::DynCamera; + +use crate::interactions::inplane_interaction::InplaneInteraction; +use crate::interactions::orbit_interaction::OrbitalInteraction; + +/// Clipping planes for the Wgpu renderer +#[derive(Clone, Copy, Debug)] +pub struct WgpuClippingPlanes { + /// Near clipping plane + pub near: f64, + /// Far clipping plane + pub far: f64, +} + +impl Default for WgpuClippingPlanes { + fn default() -> Self { + WgpuClippingPlanes { + near: 0.1, + far: 1000.0, + } + } +} + +impl WgpuClippingPlanes { + fn z_from_ndc(&self, ndc: f64) -> f64 { + -(self.far * self.near) / (-self.far + ndc * self.far - ndc * self.near) + } + + pub(crate) fn _ndc_from_z(&self, z: f64) -> f64 { + (self.far * (z - self.near)) / (z * (self.far - self.near)) + } +} + +/// Interaction state for pointer +#[derive(Clone, Copy)] +pub struct InteractionPointerState { + /// Start uv position + pub start_uv: VecF64<2>, +} + +/// Scene focus +#[derive(Clone, Copy)] +pub struct SceneFocus { + /// Depth + pub depth: f64, + /// UV position + pub uv: VecF64<2>, +} + +#[derive(Clone, Copy)] +/// Scroll state +pub struct ScrollState {} + +/// Interaction state +pub enum InteractionEnum { + /// orbit interaction state + OrbitalInteraction(OrbitalInteraction), + /// in-plane interaction state + InplaneInteraction(InplaneInteraction), +} + +impl InteractionEnum { + /// Get scene_from_camera isometry + pub fn scene_from_camera(&self) -> Isometry3 { + match self { + InteractionEnum::OrbitalInteraction(orbit) => orbit.scene_from_camera, + InteractionEnum::InplaneInteraction(inplane) => inplane.scene_from_camera, + } + } + + /// process event + pub fn process_event( + &mut self, + cam: &DynCamera, + response: &egui::Response, + scales: &VecF32<2>, + z_buffer: &ArcImageF32, + ) { + match self { + InteractionEnum::OrbitalInteraction(orbit) => { + orbit.process_event(cam, response, scales, z_buffer) + } + InteractionEnum::InplaneInteraction(inplane) => { + inplane.process_event(cam, response, scales, z_buffer) + } + } + } +} diff --git a/crates/sophus_viewer/src/interactions/inplane_interaction.rs b/crates/sophus_viewer/src/interactions/inplane_interaction.rs new file mode 100644 index 0000000..77e7f76 --- /dev/null +++ b/crates/sophus_viewer/src/interactions/inplane_interaction.rs @@ -0,0 +1,33 @@ +use eframe::egui; +use sophus_core::linalg::VecF32; +use sophus_image::arc_image::ArcImageF32; +use sophus_lie::Isometry3; +use sophus_sensor::DynCamera; + +use crate::interactions::InteractionPointerState; +use crate::interactions::SceneFocus; +use crate::interactions::ScrollState; +use crate::interactions::WgpuClippingPlanes; + +#[derive(Clone, Copy)] +/// Interaction state +pub struct InplaneInteraction { + pub(crate) maybe_pointer_state: Option, + pub(crate) maybe_scroll_state: Option, + pub(crate) maybe_scene_focus: Option, + pub(crate) _clipping_planes: WgpuClippingPlanes, + pub(crate) scene_from_camera: Isometry3, +} + +impl InplaneInteraction { + /// Process event + pub fn process_event( + &mut self, + _cam: &DynCamera, + _response: &egui::Response, + _scales: &VecF32<2>, + _z_buffer: &ArcImageF32, + ) { + todo!() + } +} diff --git a/crates/sophus_viewer/src/scene_renderer/interaction.rs b/crates/sophus_viewer/src/interactions/orbit_interaction.rs similarity index 84% rename from crates/sophus_viewer/src/scene_renderer/interaction.rs rename to crates/sophus_viewer/src/interactions/orbit_interaction.rs index 9f04c1f..11116d2 100644 --- a/crates/sophus_viewer/src/scene_renderer/interaction.rs +++ b/crates/sophus_viewer/src/interactions/orbit_interaction.rs @@ -1,53 +1,21 @@ use eframe::egui; +use sophus_core::linalg::VecF32; use sophus_core::linalg::VecF64; +use sophus_core::IsTensorLike; use sophus_image::arc_image::ArcImageF32; -use sophus_image::prelude::*; -use sophus_lie::prelude::*; +use sophus_image::image_view::IsImageView; +use sophus_lie::traits::IsTranslationProductGroup; use sophus_lie::Isometry3; -use sophus_sensor::dyn_camera::DynCamera; +use sophus_sensor::DynCamera; -/// Clipping planes for the Wgpu renderer -#[derive(Clone, Copy)] -pub struct WgpuClippingPlanes { - /// Near clipping plane - pub near: f64, - /// Far clipping plane - pub far: f64, -} - -impl WgpuClippingPlanes { - fn z_from_ndc(&self, ndc: f64) -> f64 { - -(self.far * self.near) / (-self.far + ndc * self.far - ndc * self.near) - } - - pub(crate) fn _ndc_from_z(&self, z: f64) -> f64 { - (self.far * (z - self.near)) / (z * (self.far - self.near)) - } -} - -/// Interaction state for pointer -#[derive(Clone, Copy)] -pub struct InteractionPointerState { - /// Start uv position - pub start_uv: VecF64<2>, -} - -/// Scene focus -#[derive(Clone, Copy)] -pub struct SceneFocus { - /// Depth - pub depth: f64, - /// UV position - pub uv: VecF64<2>, -} - -#[derive(Clone, Copy)] -/// Scroll state -pub struct ScrollState {} +use crate::interactions::InteractionPointerState; +use crate::interactions::SceneFocus; +use crate::interactions::ScrollState; +use crate::interactions::WgpuClippingPlanes; #[derive(Clone, Copy)] /// Interaction state -pub struct Interaction { +pub struct OrbitalInteraction { pub(crate) maybe_pointer_state: Option, pub(crate) maybe_scroll_state: Option, pub(crate) maybe_scene_focus: Option, @@ -55,7 +23,7 @@ pub struct Interaction { pub(crate) scene_from_camera: Isometry3, } -impl Interaction { +impl OrbitalInteraction { fn median_scene_depth(&self, z_buffer: &ArcImageF32) -> f64 { // to median ndc z let scalar_view = z_buffer.tensor.scalar_view(); @@ -86,6 +54,7 @@ impl Interaction { &mut self, cam: &DynCamera, response: &egui::Response, + scales: &VecF32<2>, z_buffer: &ArcImageF32, ) { let last_pointer_pos = response.ctx.input(|i| i.pointer.latest_pos()); @@ -102,7 +71,10 @@ impl Interaction { let scroll_stopped = self.maybe_scroll_state.is_some() && is_scroll_zero; if scroll_started { - let uv = last_pointer_pos - response.rect.min; + let uv = egui::Vec2::new( + (last_pointer_pos - response.rect.min)[0] * scales[0], + (last_pointer_pos - response.rect.min)[1] * scales[1], + ); if self.maybe_scene_focus.is_none() { // Typically, the scene focus shall only be set by the pointer interaction event. But @@ -158,6 +130,7 @@ impl Interaction { &mut self, cam: &DynCamera, response: &egui::Response, + scales: &VecF32<2>, z_buffer: &ArcImageF32, ) { let delta_x = response.drag_delta().x; @@ -168,7 +141,10 @@ impl Interaction { let pointer = response.interact_pointer_pos().unwrap(); - let uv = pointer - response.rect.min; + let uv = egui::Pos2::new( + (pointer - response.rect.min)[0] * scales[0], + (pointer - response.rect.min)[1] * scales[1], + ); let mut z = self .clipping_planes @@ -191,7 +167,8 @@ impl Interaction { if response.dragged_by(egui::PointerButton::Secondary) { // translate scene - let current_pixel = response.interact_pointer_pos().unwrap() - response.rect.min; + let c = response.interact_pointer_pos().unwrap() - response.rect.min; + let current_pixel = egui::Vec2::new(c[0] * scales[0], c[1] * scales[1]); let scene_focus = self.maybe_scene_focus.unwrap(); let start_pixel = self.maybe_pointer_state.unwrap().start_uv; let depth = scene_focus.depth; @@ -235,9 +212,10 @@ impl Interaction { &mut self, cam: &DynCamera, response: &egui::Response, + scales: &VecF32<2>, z_buffer: &ArcImageF32, ) { - self.process_pointer(cam, response, z_buffer); - self.process_scrolls(cam, response, z_buffer); + self.process_pointer(cam, response, scales, z_buffer); + self.process_scrolls(cam, response, scales, z_buffer); } } diff --git a/crates/sophus_viewer/src/lib.rs b/crates/sophus_viewer/src/lib.rs index 583ba12..7d779ec 100644 --- a/crates/sophus_viewer/src/lib.rs +++ b/crates/sophus_viewer/src/lib.rs @@ -6,43 +6,23 @@ /// The actor for the viewer. pub mod actor; +/// Interactions +pub mod interactions; /// The offscreen texture for rendering. pub mod offscreen; /// The pixel renderer for 2D rendering. pub mod pixel_renderer; /// The renderable structs. -pub mod renderable; +pub mod renderables; /// The scene renderer for 3D rendering. pub mod scene_renderer; +/// The simple viewer. +pub mod simple_viewer; +/// The view struct. +pub mod views; -use self::actor::ViewerBuilder; -use self::offscreen::OffscreenTexture; -use self::pixel_renderer::PixelRenderer; -use self::renderable::Renderable; -use self::scene_renderer::depth_renderer::DepthRenderer; -use self::scene_renderer::textured_mesh::TexturedMeshVertex3; -use self::scene_renderer::SceneRenderer; -use crate::pixel_renderer::LineVertex2; -use crate::pixel_renderer::PointVertex2; -use crate::scene_renderer::line::LineVertex3; -use crate::scene_renderer::mesh::MeshVertex3; -use crate::scene_renderer::point::PointVertex3; -use eframe::egui::load::SizedTexture; -use eframe::egui::Image; -use eframe::egui::Sense; -use eframe::egui::{self}; use eframe::egui_wgpu::Renderer; use eframe::epaint::mutex::RwLock; -use hollywood::actors::egui::EguiAppFromBuilder; -use hollywood::actors::egui::Stream; -use hollywood::compute::pipeline::CancelRequest; -use hollywood::RequestWithReplyChannel; -use sophus_core::tensor::tensor_view::IsTensorLike; -use sophus_image::arc_image::ArcImage4U8; -use sophus_image::image_view::IsImageView; -use sophus_image::ImageSize; -use sophus_lie::Isometry3; -use sophus_sensor::dyn_camera::DynCamera; use std::sync::Arc; /// The state of the viewer. @@ -53,395 +33,3 @@ pub struct ViewerRenderState { pub(crate) queue: Arc, pub(crate) _adapter: Arc, } - -pub(crate) struct BackgroundTexture { - pub(crate) background_image_texture: wgpu::Texture, - pub(crate) background_image_tex_id: egui::TextureId, -} - -impl BackgroundTexture { - fn new(image_size: ImageSize, render_state: ViewerRenderState) -> Self { - let background_image_target_target = - render_state - .device - .create_texture(&wgpu::TextureDescriptor { - label: None, - size: wgpu::Extent3d { - width: image_size.width as u32, - height: image_size.height as u32, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format: wgpu::TextureFormat::Rgba8UnormSrgb, - usage: wgpu::TextureUsages::RENDER_ATTACHMENT - | wgpu::TextureUsages::COPY_DST - | wgpu::TextureUsages::TEXTURE_BINDING, - view_formats: &[wgpu::TextureFormat::Rgba8UnormSrgb], - }); - - let background_image_texture_view = - background_image_target_target.create_view(&wgpu::TextureViewDescriptor::default()); - let background_image_tex_id = render_state.wgpu_state.write().register_native_texture( - render_state.device.as_ref(), - &background_image_texture_view, - wgpu::FilterMode::Linear, - ); - - Self { - background_image_texture: background_image_target_target, - background_image_tex_id, - } - } -} - -/// The simple viewer top-level struct. -pub struct SimpleViewer { - state: ViewerRenderState, - offscreen: OffscreenTexture, - cam: DynCamera, - pixel: PixelRenderer, - scene: SceneRenderer, - background_image: Option, - background_texture: Option, - message_recv: tokio::sync::mpsc::UnboundedReceiver>>, - request_recv: - tokio::sync::mpsc::UnboundedReceiver>>, - cancel_request_sender: tokio::sync::mpsc::UnboundedSender, -} - -impl EguiAppFromBuilder for SimpleViewer { - fn new(builder: ViewerBuilder, render_state: ViewerRenderState) -> Box { - let depth_stencil = Some(wgpu::DepthStencilState { - format: wgpu::TextureFormat::Depth32Float, - depth_write_enabled: true, - depth_compare: wgpu::CompareFunction::Less, - stencil: wgpu::StencilState::default(), - bias: wgpu::DepthBiasState::default(), - }); - - Box::new(SimpleViewer { - state: render_state.clone(), - offscreen: OffscreenTexture::new(&render_state, &builder.config.camera.intrinsics), - cam: builder.config.camera.intrinsics.clone(), - pixel: PixelRenderer::new(&render_state, &builder, depth_stencil.clone()), - scene: SceneRenderer::new(&render_state, &builder, depth_stencil), - message_recv: builder.message_from_actor_recv, - request_recv: builder.in_request_from_actor_recv, - cancel_request_sender: builder.cancel_request_sender.unwrap(), - background_image: None, - background_texture: None, - }) - } - - type Out = SimpleViewer; - - type State = ViewerRenderState; -} - -impl SimpleViewer { - fn add_renderables_to_tables(&mut self) { - loop { - let maybe_stream = self.message_recv.try_recv(); - if maybe_stream.is_err() { - break; - } - let stream = maybe_stream.unwrap(); - for m in stream.msg { - match m { - Renderable::Lines2(lines) => { - self.pixel - .line_renderer - .lines_table - .insert(lines.name, lines.lines); - } - Renderable::Points2(points) => { - self.pixel - .point_renderer - .points_table - .insert(points.name, points.points); - } - Renderable::Lines3(lines3) => { - self.scene - .line_renderer - .line_table - .insert(lines3.name, lines3.lines); - } - Renderable::Points3(points3) => { - self.scene - .point_renderer - .point_table - .insert(points3.name, points3.points); - } - Renderable::Mesh3(mesh) => { - self.scene - .mesh_renderer - .mesh_table - .insert(mesh.name, mesh.mesh); - } - Renderable::TexturedMesh3(textured_mesh) => { - self.scene - .textured_mesh_renderer - .mesh_table - .insert(textured_mesh.name, textured_mesh.mesh); - } - Renderable::BackgroundImage(image) => { - self.background_image = Some(image); - } - } - } - } - - loop { - let maybe_request = self.request_recv.try_recv(); - if maybe_request.is_err() { - break; - } - let request = maybe_request.unwrap(); - request.reply(self.scene.interaction.scene_from_camera); - } - - for (_, points) in self.pixel.point_renderer.points_table.iter() { - for point in points.iter() { - let v = PointVertex2 { - _pos: [point.p[0], point.p[1]], - _color: [point.color.r, point.color.g, point.color.b, point.color.a], - _point_size: point.point_size, - }; - for _i in 0..6 { - self.pixel.point_renderer.vertex_data.push(v); - } - } - } - for (_, lines) in self.pixel.line_renderer.lines_table.iter() { - for line in lines.iter() { - let p0 = line.p0; - let p1 = line.p1; - let d = (p0 - p1).normalize(); - let normal = [d[1], -d[0]]; - - let v0 = LineVertex2 { - _pos: [p0[0], p0[1]], - _normal: normal, - _color: [line.color.r, line.color.g, line.color.b, line.color.a], - _line_width: line.line_width, - }; - let v1 = LineVertex2 { - _pos: [p1[0], p1[1]], - _normal: normal, - _color: [line.color.r, line.color.g, line.color.b, line.color.a], - _line_width: line.line_width, - }; - self.pixel.line_renderer.vertex_data.push(v0); - self.pixel.line_renderer.vertex_data.push(v0); - self.pixel.line_renderer.vertex_data.push(v1); - self.pixel.line_renderer.vertex_data.push(v0); - self.pixel.line_renderer.vertex_data.push(v1); - self.pixel.line_renderer.vertex_data.push(v1); - } - } - for (_, points) in self.scene.point_renderer.point_table.iter() { - for point in points.iter() { - let v = PointVertex3 { - _pos: [point.p[0], point.p[1], point.p[2]], - _color: [point.color.r, point.color.g, point.color.b, point.color.a], - _point_size: point.point_size, - }; - for _i in 0..6 { - self.scene.point_renderer.vertex_data.push(v); - } - } - } - for (_, lines) in self.scene.line_renderer.line_table.iter() { - for line in lines.iter() { - let p0 = line.p0; - let p1 = line.p1; - - let v0 = LineVertex3 { - _p0: [p0[0], p0[1], p0[2]], - _p1: [p1[0], p1[1], p1[2]], - _color: [line.color.r, line.color.g, line.color.b, line.color.a], - _line_width: line.line_width, - }; - let v1 = LineVertex3 { - _p0: [p0[0], p0[1], p0[2]], - _p1: [p1[0], p1[1], p1[2]], - _color: [line.color.r, line.color.g, line.color.b, line.color.a], - _line_width: line.line_width, - }; - self.scene.line_renderer.vertex_data.push(v0); - self.scene.line_renderer.vertex_data.push(v0); - self.scene.line_renderer.vertex_data.push(v1); - self.scene.line_renderer.vertex_data.push(v0); - self.scene.line_renderer.vertex_data.push(v1); - self.scene.line_renderer.vertex_data.push(v1); - } - } - for (_, mesh) in self.scene.mesh_renderer.mesh_table.iter() { - for trig in mesh.iter() { - let v0 = MeshVertex3 { - _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], - _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], - }; - let v1 = MeshVertex3 { - _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], - _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], - }; - let v2 = MeshVertex3 { - _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], - _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], - }; - self.scene.mesh_renderer.vertices.push(v0); - self.scene.mesh_renderer.vertices.push(v1); - self.scene.mesh_renderer.vertices.push(v2); - } - } - for (_, mesh) in self.scene.textured_mesh_renderer.mesh_table.iter() { - for trig in mesh.iter() { - let v0 = TexturedMeshVertex3 { - _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], - _tex: [trig.tex0[0], trig.tex0[1]], - }; - let v1 = TexturedMeshVertex3 { - _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], - _tex: [trig.tex1[0], trig.tex1[1]], - }; - let v2 = TexturedMeshVertex3 { - _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], - _tex: [trig.tex2[0], trig.tex2[1]], - }; - self.scene.textured_mesh_renderer.vertices.push(v0); - self.scene.textured_mesh_renderer.vertices.push(v1); - self.scene.textured_mesh_renderer.vertices.push(v2); - } - } - } -} - -impl eframe::App for SimpleViewer { - fn on_exit(&mut self, _gl: Option<&eframe::glow::Context>) { - self.cancel_request_sender.send(CancelRequest).unwrap(); - } - - fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) { - egui_extras::install_image_loaders(ctx); - - self.scene.clear_vertex_data(); - self.pixel.clear_vertex_data(); - - self.add_renderables_to_tables(); - - self.pixel.prepare(&self.state); - self.scene.prepare(&self.state, &self.cam); - - if let Some(image) = self.background_image.clone() { - self.background_texture = Some(BackgroundTexture::new( - image.image_size(), - self.state.clone(), - )); - let tex_ref = self.background_texture.as_ref().unwrap(); - - self.state.queue.write_texture( - wgpu::ImageCopyTexture { - texture: &tex_ref.background_image_texture, - mip_level: 0, - origin: wgpu::Origin3d::ZERO, - aspect: wgpu::TextureAspect::All, - }, - bytemuck::cast_slice(image.tensor.scalar_view().as_slice().unwrap()), - wgpu::ImageDataLayout { - offset: 0, - bytes_per_row: Some(4 * image.image_size().width as u32), - rows_per_image: None, - }, - tex_ref.background_image_texture.size(), - ); - - self.background_image = None; - } - // self.state.queue.write_texture( - // wgpu::ImageCopyTexture { - // texture: &self.buffers.dist_texture, - // mip_level: 0, - // origin: wgpu::Origin3d::ZERO, - // aspect: wgpu::TextureAspect::All, - // }, - // bytemuck::cast_slice(distort_lut.table.tensor.scalar_view().as_slice().unwrap()), - // wgpu::ImageDataLayout { - // offset: 0, - // bytes_per_row: Some(8 * distort_lut.table.image_size().width as u32), - // rows_per_image: None, - // }, - // self.buffers.dist_texture.size(), - // ); - - let mut command_encoder = self - .state - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); - - self.scene.paint( - &mut command_encoder, - &self.offscreen.rgba_texture_view, - &self.scene.depth_renderer, - ); - self.pixel.paint( - &mut command_encoder, - &self.offscreen.rgba_texture_view, - &self.scene.depth_renderer, - ); - - self.state.queue.submit(Some(command_encoder.finish())); - - self.pixel - .show_interaction_marker(&self.state, &self.scene.interaction); - - let mut command_encoder = self - .state - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); - self.scene.depth_paint( - &mut command_encoder, - &self.offscreen.depth_texture_view, - &self.scene.depth_renderer, - ); - - let depth_image = - self.offscreen - .download_images(&self.state, command_encoder, &self.cam.image_size()); - - egui::CentralPanel::default().show(ctx, |ui| { - ui.scope(|ui| { - if let Some(image) = &self.background_texture { - ui.add( - Image::new(SizedTexture { - size: egui::Vec2::new( - image.background_image_texture.size().width as f32, - image.background_image_texture.size().height as f32, - ), - id: image.background_image_tex_id, - }) - .fit_to_original_size(1.0), - ); - } - - let response = ui.add( - Image::new(SizedTexture { - size: egui::Vec2::new( - self.cam.image_size().width as f32, - self.cam.image_size().height as f32, - ), - id: self.offscreen.rgba_tex_id, - }) - .fit_to_original_size(1.0) - .sense(Sense::click_and_drag()), - ); - - self.scene.process_event(&self.cam, &response, depth_image); - }); - }); - - ctx.request_repaint(); - } -} diff --git a/crates/sophus_viewer/src/offscreen.rs b/crates/sophus_viewer/src/offscreen.rs index 0e5ec01..5392435 100644 --- a/crates/sophus_viewer/src/offscreen.rs +++ b/crates/sophus_viewer/src/offscreen.rs @@ -1,9 +1,11 @@ +use std::num::NonZeroU32; + use crate::ViewerRenderState; use eframe::egui::{self}; use sophus_image::arc_image::ArcImageF32; use sophus_image::image_view::ImageViewF32; use sophus_image::ImageSize; -use sophus_sensor::DynCamera; +use wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; #[derive(Debug)] pub(crate) struct OffscreenTexture { @@ -15,17 +17,17 @@ pub(crate) struct OffscreenTexture { } impl OffscreenTexture { - pub(crate) fn new(render_state: &ViewerRenderState, intrinsics: &DynCamera) -> Self { - let w = intrinsics.image_size().width as f32; - let h = intrinsics.image_size().height as f32; + pub(crate) fn new(render_state: &ViewerRenderState, image_size: &ImageSize) -> Self { + let w = image_size.width as u32; + let h = image_size.height as u32; let render_target = render_state .device .create_texture(&wgpu::TextureDescriptor { label: None, size: wgpu::Extent3d { - width: w as u32, - height: h as u32, + width: w, + height: h, depth_or_array_layers: 1, }, mip_level_count: 1, @@ -45,15 +47,13 @@ impl OffscreenTexture { wgpu::FilterMode::Linear, ); - let depth_texture_data = Vec::::with_capacity(w as usize * h as usize * 4); - let depth_render_target = render_state .device .create_texture(&wgpu::TextureDescriptor { label: None, size: wgpu::Extent3d { - width: w as u32, - height: h as u32, + width: w, + height: h, depth_or_array_layers: 1, }, mip_level_count: 1, @@ -65,10 +65,15 @@ impl OffscreenTexture { | wgpu::TextureUsages::TEXTURE_BINDING, view_formats: &[wgpu::TextureFormat::R32Float], }); + + let bytes_per_row = Self::bytes_per_row(w); + + let required_buffer_size = bytes_per_row * h; // Total bytes needed in the buffer + let depth_output_staging_buffer = render_state.device.create_buffer(&wgpu::BufferDescriptor { label: None, - size: depth_texture_data.capacity() as u64, + size: required_buffer_size as wgpu::BufferAddress, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false, }); @@ -93,6 +98,13 @@ impl OffscreenTexture { } } + fn bytes_per_row(width: u32) -> u32 { + let bytes_per_pixel = 4; + let unaligned_bytes_per_row = width * bytes_per_pixel; + let align = COPY_BYTES_PER_ROW_ALIGNMENT; // GPU's row alignment requirement + (unaligned_bytes_per_row + align - 1) & !(align - 1) + } + // download the depth image from the GPU to ArcImageF32 and copy the rgba texture to the egui texture pub fn download_images( &self, @@ -100,8 +112,9 @@ impl OffscreenTexture { mut command_encoder: wgpu::CommandEncoder, image_size: &ImageSize, ) -> ArcImageF32 { - let w = image_size.width as f32; - let h = image_size.height as f32; + let w = image_size.width as u32; + let h = image_size.height as u32; + command_encoder.copy_texture_to_buffer( wgpu::ImageCopyTexture { texture: &self.depth_render_target, @@ -113,16 +126,17 @@ impl OffscreenTexture { buffer: &self.depth_output_staging_buffer, layout: wgpu::ImageDataLayout { offset: 0, - bytes_per_row: Some((w as u32) * 4), - rows_per_image: Some(h as u32), + bytes_per_row: Some(NonZeroU32::new(Self::bytes_per_row(w)).unwrap().get()), + rows_per_image: Some(h), }, }, wgpu::Extent3d { - width: w as u32, - height: h as u32, + width: w, + height: h, depth_or_array_layers: 1, }, ); + state.queue.submit(Some(command_encoder.finish())); #[allow(unused_assignments)] diff --git a/crates/sophus_viewer/src/pixel_renderer.rs b/crates/sophus_viewer/src/pixel_renderer.rs index 616f49f..386a735 100644 --- a/crates/sophus_viewer/src/pixel_renderer.rs +++ b/crates/sophus_viewer/src/pixel_renderer.rs @@ -3,18 +3,19 @@ pub mod line; /// Pixel point renderer pub mod pixel_point; -use self::line::PixelLineRenderer; -use self::pixel_point::PixelPointRenderer; -use crate::actor::ViewerBuilder; -use crate::scene_renderer::interaction::Interaction; -use crate::DepthRenderer; -use crate::ViewerRenderState; use bytemuck::Pod; use bytemuck::Zeroable; +use sophus_image::ImageSize; use std::num::NonZeroU64; use wgpu::util::DeviceExt; use wgpu::DepthStencilState; +use crate::interactions::InteractionEnum; +use crate::pixel_renderer::line::PixelLineRenderer; +use crate::pixel_renderer::pixel_point::PixelPointRenderer; +use crate::scene_renderer::depth_renderer::DepthRenderer; +use crate::ViewerRenderState; + /// Renderer for pixel data pub struct PixelRenderer { pub(crate) uniform_bind_group: wgpu::BindGroup, @@ -64,7 +65,7 @@ impl PixelRenderer { /// Create a new pixel renderer pub fn new( wgpu_render_state: &ViewerRenderState, - builder: &ViewerBuilder, + image_size: &ImageSize, depth_stencil: Option, ) -> Self { let device = &wgpu_render_state.device; @@ -90,12 +91,14 @@ impl PixelRenderer { }); let camera_uniform = OrthoCam { - width: builder.config.camera.intrinsics.image_size().width as f32, - height: builder.config.camera.intrinsics.image_size().height as f32, + width: image_size.width as f32, + height: image_size.height as f32, dummy0: 0.0, dummy1: 0.0, }; + println!("image_size: {:?}", image_size); + let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("pixel uniform buffer"), contents: bytemuck::cast_slice(&[camera_uniform]), @@ -130,32 +133,63 @@ impl PixelRenderer { pub(crate) fn show_interaction_marker( &self, state: &ViewerRenderState, - interaction_state: &Interaction, + interaction_state: &InteractionEnum, ) { - if let Some(scene_focus) = interaction_state.maybe_scene_focus { - *self.point_renderer.show_interaction_marker.lock().unwrap() = - if interaction_state.maybe_pointer_state.is_some() - || interaction_state.maybe_scroll_state.is_some() - { - let mut vertex_data = vec![]; - - for _i in 0..6 { - vertex_data.push(PointVertex2 { - _pos: [scene_focus.uv[0] as f32, scene_focus.uv[1] as f32], - _color: [0.5, 0.5, 0.5, 1.0], - _point_size: 5.0, - }); - } - state.queue.write_buffer( - &self.point_renderer.interaction_vertex_buffer, - 0, - bytemuck::cast_slice(&vertex_data), - ); - - true - } else { - false - }; + match interaction_state { + InteractionEnum::OrbitalInteraction(orbital_interaction) => { + if let Some(scene_focus) = orbital_interaction.maybe_scene_focus { + *self.point_renderer.show_interaction_marker.lock().unwrap() = + if orbital_interaction.maybe_pointer_state.is_some() + || orbital_interaction.maybe_scroll_state.is_some() + { + let mut vertex_data = vec![]; + + for _i in 0..6 { + vertex_data.push(PointVertex2 { + _pos: [scene_focus.uv[0] as f32, scene_focus.uv[1] as f32], + _color: [0.5, 0.5, 0.5, 1.0], + _point_size: 5.0, + }); + } + state.queue.write_buffer( + &self.point_renderer.interaction_vertex_buffer, + 0, + bytemuck::cast_slice(&vertex_data), + ); + + true + } else { + false + }; + } + } + InteractionEnum::InplaneInteraction(inplane_interaction) => { + if let Some(scene_focus) = inplane_interaction.maybe_scene_focus { + *self.point_renderer.show_interaction_marker.lock().unwrap() = + if inplane_interaction.maybe_pointer_state.is_some() + || inplane_interaction.maybe_scroll_state.is_some() + { + let mut vertex_data = vec![]; + + for _i in 0..6 { + vertex_data.push(PointVertex2 { + _pos: [scene_focus.uv[0] as f32, scene_focus.uv[1] as f32], + _color: [0.5, 0.5, 0.5, 1.0], + _point_size: 5.0, + }); + } + state.queue.write_buffer( + &self.point_renderer.interaction_vertex_buffer, + 0, + bytemuck::cast_slice(&vertex_data), + ); + + true + } else { + false + }; + } + } } } diff --git a/crates/sophus_viewer/src/pixel_renderer/line.rs b/crates/sophus_viewer/src/pixel_renderer/line.rs index 88f0168..4e18e29 100644 --- a/crates/sophus_viewer/src/pixel_renderer/line.rs +++ b/crates/sophus_viewer/src/pixel_renderer/line.rs @@ -1,11 +1,13 @@ -use crate::renderable::Line2; -use crate::LineVertex2; -use crate::ViewerRenderState; +use std::collections::BTreeMap; + use eframe::egui_wgpu::wgpu::util::DeviceExt; use nalgebra::SVector; -use std::collections::BTreeMap; use wgpu::DepthStencilState; +use crate::pixel_renderer::LineVertex2; +use crate::renderables::renderable2d::Line2; +use crate::ViewerRenderState; + /// Pixel line renderer pub struct PixelLineRenderer { pub(crate) pipeline: wgpu::RenderPipeline, diff --git a/crates/sophus_viewer/src/pixel_renderer/pixel_point.rs b/crates/sophus_viewer/src/pixel_renderer/pixel_point.rs index 5f66071..fcb4b58 100644 --- a/crates/sophus_viewer/src/pixel_renderer/pixel_point.rs +++ b/crates/sophus_viewer/src/pixel_renderer/pixel_point.rs @@ -1,11 +1,13 @@ -use crate::renderable::Point2; -use crate::PointVertex2; -use crate::ViewerRenderState; -use eframe::egui_wgpu::wgpu::util::DeviceExt; use std::collections::BTreeMap; use std::sync::Mutex; + +use eframe::egui_wgpu::wgpu::util::DeviceExt; use wgpu::DepthStencilState; +use crate::pixel_renderer::PointVertex2; +use crate::renderables::renderable2d::Point2; +use crate::ViewerRenderState; + /// Pixel point renderer pub struct PixelPointRenderer { pub(crate) pipeline: wgpu::RenderPipeline, diff --git a/crates/sophus_viewer/src/renderable.rs b/crates/sophus_viewer/src/renderable.rs deleted file mode 100644 index 8a01c1b..0000000 --- a/crates/sophus_viewer/src/renderable.rs +++ /dev/null @@ -1,170 +0,0 @@ -use sophus_image::arc_image::ArcImage4F32; -use sophus_image::arc_image::ArcImage4U8; - -use nalgebra::SVector; - -/// Color with alpha channel -#[derive(Copy, Clone, Debug, Default)] -pub struct Color { - /// Red channel - pub r: f32, - /// Green channel - pub g: f32, - /// Blue channel - pub b: f32, - /// Alpha channel - pub a: f32, -} - -/// Renderable entity -#[derive(Clone, Debug)] -pub enum Renderable { - /// 2D lines - Lines2(Lines2), - /// 2D points - Points2(Points2), - /// 3D lines - Lines3(Lines3), - /// 3D points - Points3(Points3), - /// 3D mesh - Mesh3(Mesh3), - /// 3D textured mesh - TexturedMesh3(TexturedMesh3), - /// Background image - BackgroundImage(ArcImage4U8), -} - -/// 2D lines -#[derive(Clone, Debug)] -pub struct Lines2 { - /// Name of the entity - pub name: String, - /// List of lines - pub lines: Vec, -} - -/// 2D points -#[derive(Clone, Debug)] -pub struct Points2 { - /// Name of the entity - pub name: String, - /// List of points - pub points: Vec, -} - -/// 3D lines -#[derive(Clone, Debug)] -pub struct Lines3 { - /// Name of the entity - pub name: String, - /// List of lines - pub lines: Vec, -} - -/// 3D points -#[derive(Clone, Debug)] -pub struct Points3 { - /// Name of the entity - pub name: String, - /// List of points - pub points: Vec, -} - -/// 3D mesh -#[derive(Clone, Debug)] -pub struct Mesh3 { - /// Name of the entity - pub name: String, - /// List of triangles - pub mesh: Vec, -} - -/// 3D textured mesh -#[derive(Clone, Debug)] -pub struct TexturedMesh3 { - /// Name of the entity - pub name: String, - /// List of textured triangles - pub mesh: Vec, - /// Texture image - pub texture: ArcImage4F32, -} - -/// 2D line -#[derive(Clone, Debug)] -pub struct Line2 { - /// Start point - pub p0: SVector, - /// End point - pub p1: SVector, - /// Color - pub color: Color, - /// Line width - pub line_width: f32, -} - -/// 3D line -#[derive(Clone, Debug)] -pub struct Line3 { - /// Start point - pub p0: SVector, - /// End point - pub p1: SVector, - /// Color - pub color: Color, - /// Line width - pub line_width: f32, -} - -/// 2D point -#[derive(Clone, Debug)] -pub struct Point2 { - /// Point - pub p: SVector, - /// Color - pub color: Color, - /// Point size in pixels - pub point_size: f32, -} - -/// 3D point -#[derive(Clone, Debug)] -pub struct Point3 { - /// Point - pub p: SVector, - /// Color - pub color: Color, - /// Point size in pixels - pub point_size: f32, -} - -/// 3D triangle -#[derive(Clone, Debug)] -pub struct Triangle3 { - /// Vertex 0 - pub p0: SVector, - /// Vertex 1 - pub p1: SVector, - /// Vertex 2 - pub p2: SVector, - /// Triangle color - pub color: Color, -} - -/// 3D textured triangle -#[derive(Clone, Debug)] -pub struct TexturedTriangle3 { - /// Vertex 0 - pub p0: SVector, - /// Vertex 1 - pub p1: SVector, - /// Vertex 2 - pub p2: SVector, - /// Texture coordinates for vertex 0 - pub tex0: SVector, - /// Texture coordinates for vertex 1 - pub tex1: SVector, - /// Texture coordinates for vertex 2 - pub tex2: SVector, -} diff --git a/crates/sophus_viewer/src/renderables.rs b/crates/sophus_viewer/src/renderables.rs new file mode 100644 index 0000000..4b4637a --- /dev/null +++ b/crates/sophus_viewer/src/renderables.rs @@ -0,0 +1,36 @@ +/// color +pub mod color; +/// frame +pub mod frame; +/// 2d renderable +pub mod renderable2d; +/// 3d rendeable +pub mod renderable3d; + +use sophus_image::arc_image::ArcImage4U8; + +use crate::renderables::renderable2d::View2dPacket; +use crate::renderables::renderable3d::View3dPacket; + +/// Image view renderable +#[derive(Clone, Debug)] +pub enum ImageViewRenderable { + /// Background image + BackgroundImage(ArcImage4U8), +} + +/// Packet of renderables +#[derive(Clone, Debug)] +pub enum Packet { + /// View3d packet + View3d(View3dPacket), + /// View2d packet + View2d(View2dPacket), +} + +/// Packet of renderables +#[derive(Clone, Debug, Default)] +pub struct Packets { + /// List of packets + pub packets: Vec, +} diff --git a/crates/sophus_viewer/src/renderables/color.rs b/crates/sophus_viewer/src/renderables/color.rs new file mode 100644 index 0000000..1df2ff7 --- /dev/null +++ b/crates/sophus_viewer/src/renderables/color.rs @@ -0,0 +1,64 @@ +/// Color with alpha channel +#[derive(Copy, Clone, Debug, Default)] +pub struct Color { + /// Red channel + pub r: f32, + /// Green channel + pub g: f32, + /// Blue channel + pub b: f32, + /// Alpha channel + pub a: f32, +} + +impl Color { + /// red + pub fn red() -> Color { + Color { + r: 1.0, + g: 0.0, + b: 0.0, + a: 1.0, + } + } + + /// green + pub fn green() -> Color { + Color { + r: 0.0, + g: 0.0, + b: 1.0, + a: 1.0, + } + } + + /// blue + pub fn blue() -> Color { + Color { + r: 0.0, + g: 1.0, + b: 0.0, + a: 1.0, + } + } + + /// black + pub fn black() -> Color { + Color { + r: 0.0, + g: 0.0, + b: 0.0, + a: 1.0, + } + } + + /// white + pub fn white() -> Color { + Color { + r: 1.0, + g: 1.0, + b: 1.0, + a: 1.0, + } + } +} diff --git a/crates/sophus_viewer/src/renderables/frame.rs b/crates/sophus_viewer/src/renderables/frame.rs new file mode 100644 index 0000000..0da43a1 --- /dev/null +++ b/crates/sophus_viewer/src/renderables/frame.rs @@ -0,0 +1,100 @@ +use sophus_core::linalg::VecF64; +use sophus_image::arc_image::ArcImage4U8; +use sophus_image::image_view::IsImageView; +use sophus_image::ImageSize; +use sophus_sensor::DynCamera; + +/// Frame to hold content +/// +/// Invariants: +/// - The image, if available, must have the same size as the intrinsics. +/// - The image size must be non-zero. +#[derive(Clone, Debug)] +pub struct Frame { + // Image, if available must have the same size as the intrinsics + image: Option, + /// Intrinsics + intrinsics: DynCamera, +} + +impl Frame { + /// Try to create a new frame from an image and intrinsics + /// + /// Returns None if the image size is zero or the image size does not match the intrinsics. + pub fn try_from(image: &ArcImage4U8, intrinsics: &DynCamera) -> Option { + if intrinsics.image_size().area() == 0 { + return None; + } + if image.image_size() != intrinsics.image_size() { + return None; + } + Some(Frame { + image: Some(image.clone()), + intrinsics: intrinsics.clone(), + }) + } + + /// Create a new frame from an image + /// + /// Precondition: The image size must be non-zero. + pub fn from_image(image: &ArcImage4U8) -> Frame { + assert!(image.image_size().area() > 0); + + let intrinsics = DynCamera::new_pinhole( + &VecF64::<4>::new( + 600.0, + 600.0, + image.image_size().width as f64 * 0.5 - 0.5, + image.image_size().height as f64 * 0.5 - 0.5, + ), + image.image_size(), + ); + + println!("!!!!!!!!intrinsics: {:?}", intrinsics); + + Frame { + image: Some(image.clone()), + intrinsics, + } + } + + /// Create a new frame from intrinsics + /// + /// Precondition: The image size must be non-zero. + pub fn from_intrinsics(intrinsics: &DynCamera) -> Frame { + assert!(intrinsics.image_size().area() > 0); + Frame { + image: None, + intrinsics: intrinsics.clone(), + } + } + + /// Create a new frame from image size + /// + /// Precondition: The image size must be non-zero. + pub fn from_size(view_size: &ImageSize) -> Frame { + assert!(view_size.area() > 0); + Frame { + image: None, + intrinsics: DynCamera::new_pinhole( + &VecF64::<4>::new( + 600.0, + 600.0, + view_size.width as f64 * 0.5 - 0.5, + view_size.height as f64 * 0.5 - 0.5, + ), + *view_size, + ), + } + } + + /// Intrinsics + pub fn intrinsics(&self) -> &DynCamera { + &self.intrinsics + } + + /// Image, if available + pub fn maybe_image(&self) -> Option<&ArcImage4U8> { + self.image.as_ref() + } +} diff --git a/crates/sophus_viewer/src/renderables/renderable2d.rs b/crates/sophus_viewer/src/renderables/renderable2d.rs new file mode 100644 index 0000000..4f6f69f --- /dev/null +++ b/crates/sophus_viewer/src/renderables/renderable2d.rs @@ -0,0 +1,149 @@ +use nalgebra::SVector; + +use crate::renderables::color::Color; +use crate::renderables::frame::Frame; +use crate::renderables::renderable3d::Renderable3d; + +/// View3d renderable +#[derive(Clone, Debug)] +pub enum Renderable2d { + /// 2D lines + Lines2(Lines2), + /// 2D points + Points2(Points2), +} + +/// Packet of image renderables +#[derive(Clone, Debug)] +pub struct View2dPacket { + /// Frame to hold content + /// + /// 1. For each `view_label`, content (i.e. renderables2d, renderables3d) will be added to + /// the existing frame. If no frame exists yet, e.g. frame was always None for `view_label`, + /// the content is ignored. + /// 2. If we have a new frame, that is `frame == Some(...)`, all previous content is deleted, but + /// content from this packet will be added. + pub frame: Option, + /// List of 2d renderables + pub renderables2d: Vec, + /// List of 3d renderables + pub renderables3d: Vec, + /// Name of the view + pub view_label: String, +} + +/// Can be converted to Vec2F32 +pub trait HasToVec2F32 { + /// returns Vec2F32 + fn to_vec2(&self) -> SVector; +} + +impl HasToVec2F32 for [f32; 2] { + fn to_vec2(&self) -> SVector { + SVector::::new(self[0], self[1]) + } +} + +impl HasToVec2F32 for &[f32; 2] { + fn to_vec2(&self) -> SVector { + SVector::::new(self[0], self[1]) + } +} + +impl HasToVec2F32 for SVector { + fn to_vec2(&self) -> SVector { + *self + } +} + +/// 2D line +#[derive(Clone, Debug)] +pub struct Line2 { + /// Start point + pub p0: SVector, + /// End point + pub p1: SVector, + /// Color + pub color: Color, + /// Line width + pub line_width: f32, +} + +/// 2D point +#[derive(Clone, Debug)] +pub struct Point2 { + /// Point + pub p: SVector, + /// Color + pub color: Color, + /// Point size in pixels + pub point_size: f32, +} + +/// 2D lines +#[derive(Clone, Debug)] +pub struct Lines2 { + /// Name of the entity + pub name: String, + /// List of lines + pub lines: Vec, +} + +impl Lines2 { + /// make lines 2d + pub fn make( + name: impl ToString, + arr: &[[impl HasToVec2F32; 2]], + color: &Color, + line_width: f32, + ) -> Self { + let mut lines = Lines2 { + name: name.to_string(), + lines: vec![], + }; + + for tuple in arr { + lines.lines.push(Line2 { + p0: tuple[0].to_vec2(), + p1: tuple[1].to_vec2(), + color: *color, + line_width, + }); + } + + lines + } +} + +/// 2D points +#[derive(Clone, Debug)] +pub struct Points2 { + /// Name of the entity + pub name: String, + /// List of points + pub points: Vec, +} + +impl Points2 { + /// make points 2d + pub fn make( + name: impl ToString, + arr: &[impl HasToVec2F32], + color: &Color, + point_size: f32, + ) -> Self { + let mut points = Points2 { + name: name.to_string(), + points: vec![], + }; + + for p in arr { + points.points.push(Point2 { + p: p.to_vec2(), + color: *color, + point_size, + }); + } + points + } +} diff --git a/crates/sophus_viewer/src/renderables/renderable3d.rs b/crates/sophus_viewer/src/renderables/renderable3d.rs new file mode 100644 index 0000000..7412f92 --- /dev/null +++ b/crates/sophus_viewer/src/renderables/renderable3d.rs @@ -0,0 +1,230 @@ +use nalgebra::SVector; + +use crate::actor::ViewerCamera; +use crate::renderables::color::Color; +use crate::renderables::renderable2d::HasToVec2F32; + +/// View3d renderable +#[derive(Clone, Debug)] +pub enum Renderable3d { + /// 3D lines + Lines3(Lines3), + /// 3D points + Points3(Points3), + /// 3D mesh + Mesh3(Mesh3), +} + +/// Packet of view3d renderables +#[derive(Clone, Debug)] +pub struct View3dPacket { + /// List of 3d renderables + pub renderables3d: Vec, + /// Name of the view + pub view_label: String, + /// Initial camera, ignored if not the first packet of "view_name" + pub initial_camera: ViewerCamera, +} + +/// 3D line +#[derive(Clone, Debug)] +pub struct Line3 { + /// Start point + pub p0: SVector, + /// End point + pub p1: SVector, + /// Color + pub color: Color, + /// Line width + pub line_width: f32, +} + +/// 3D point +#[derive(Clone, Debug)] +pub struct Point3 { + /// Point + pub p: SVector, + /// Color + pub color: Color, + /// Point size in pixels + pub point_size: f32, +} + +/// 3D triangle +#[derive(Clone, Debug)] +pub struct Triangle3 { + /// Vertex 0 + pub p0: SVector, + /// Vertex 1 + pub p1: SVector, + /// Vertex 2 + pub p2: SVector, + /// Triangle color + pub color: Color, +} + +/// 3D textured triangle +#[derive(Clone, Debug)] +pub struct TexturedTriangle3 { + /// Vertex 0 + pub p0: SVector, + /// Vertex 1 + pub p1: SVector, + /// Vertex 2 + pub p2: SVector, + /// Texture coordinates for vertex 0 + pub tex0: SVector, + /// Texture coordinates for vertex 1 + pub tex1: SVector, + /// Texture coordinates for vertex 2 + pub tex2: SVector, +} + +/// Can be converted to Vec3F32 +pub trait HasToVec3F32 { + /// returns Vec3F32 + fn to_vec3(&self) -> SVector; +} + +impl HasToVec3F32 for [f32; 3] { + fn to_vec3(&self) -> SVector { + SVector::::new(self[0], self[1], self[2]) + } +} + +impl HasToVec3F32 for &[f32; 3] { + fn to_vec3(&self) -> SVector { + SVector::::new(self[0], self[1], self[2]) + } +} + +impl HasToVec3F32 for SVector { + fn to_vec3(&self) -> SVector { + *self + } +} + +/// 3D points +#[derive(Clone, Debug)] +pub struct Points3 { + /// Name of the entity + pub name: String, + /// List of points + pub points: Vec, +} + +impl Points3 { + /// make points 3d + pub fn make( + name: impl ToString, + arr: &[impl HasToVec3F32], + color: &Color, + point_size: f32, + ) -> Self { + let mut points = Points3 { + name: name.to_string(), + points: vec![], + }; + + for p in arr { + points.points.push(Point3 { + p: p.to_vec3(), + color: *color, + point_size, + }); + } + points + } +} + +/// 3D lines +#[derive(Clone, Debug)] +pub struct Lines3 { + /// Name of the entity + pub name: String, + /// List of lines + pub lines: Vec, +} + +impl Lines3 { + /// make lines 3d + pub fn make( + name: impl ToString, + arr: &[[impl HasToVec3F32; 2]], + color: &Color, + line_width: f32, + ) -> Self { + let mut lines = Lines3 { + name: name.to_string(), + lines: vec![], + }; + + for tuple in arr { + lines.lines.push(Line3 { + p0: tuple[0].to_vec3(), + p1: tuple[1].to_vec3(), + color: *color, + line_width, + }); + } + + lines + } +} + +/// 3D mesh +#[derive(Clone, Debug)] +pub struct Mesh3 { + /// Name of the entity + pub name: String, + /// List of triangles + pub mesh: Vec, +} + +impl Mesh3 { + /// make mesh + pub fn make(name: impl ToString, arr: &[([impl HasToVec3F32; 3], Color)]) -> Self { + let mut mesh = Mesh3 { + name: name.to_string(), + mesh: vec![], + }; + + for (trig, color) in arr { + mesh.mesh.push(Triangle3 { + p0: trig[0].to_vec3(), + p1: trig[1].to_vec3(), + p2: trig[2].to_vec3(), + color: *color, + }); + } + + mesh + } +} + +/// 3D textured mesh +#[derive(Clone, Debug)] +pub struct TexturedMesh3 { + /// List of textured triangles + pub mesh: Vec, +} + +impl TexturedMesh3 { + /// make textured mesh + pub fn make(arr: &[[(impl HasToVec3F32, impl HasToVec2F32); 3]]) -> Self { + let mut mesh = TexturedMesh3 { mesh: vec![] }; + + for trig in arr { + mesh.mesh.push(TexturedTriangle3 { + p0: trig[0].0.to_vec3(), + p1: trig[1].0.to_vec3(), + p2: trig[2].0.to_vec3(), + tex0: trig[0].1.to_vec2(), + tex1: trig[1].1.to_vec2(), + tex2: trig[2].1.to_vec2(), + }); + } + + mesh + } +} diff --git a/crates/sophus_viewer/src/scene_renderer.rs b/crates/sophus_viewer/src/scene_renderer.rs index 167e4a8..571469f 100644 --- a/crates/sophus_viewer/src/scene_renderer.rs +++ b/crates/sophus_viewer/src/scene_renderer.rs @@ -4,9 +4,6 @@ pub mod buffers; /// depth renderer pub mod depth_renderer; -/// interaction state -pub mod interaction; - /// line renderer pub mod line; @@ -18,22 +15,25 @@ pub mod point; /// textured mesh renderer pub mod textured_mesh; -use self::buffers::SceneRenderBuffers; -use self::interaction::Interaction; -use self::mesh::MeshRenderer; -use self::point::ScenePointRenderer; -use crate::actor::ViewerBuilder; -use crate::DepthRenderer; -use crate::ViewerRenderState; + use eframe::egui; use sophus_core::calculus::region::IsRegion; +use sophus_core::linalg::VecF32; use sophus_core::tensor::tensor_view::IsTensorLike; +use sophus_image::arc_image::ArcImage4U8; use sophus_image::arc_image::ArcImageF32; use sophus_image::image_view::IsImageView; use sophus_sensor::distortion_table::distort_table; use sophus_sensor::dyn_camera::DynCamera; use wgpu::DepthStencilState; +use crate::interactions::InteractionEnum; +use crate::scene_renderer::buffers::SceneRenderBuffers; +use crate::scene_renderer::depth_renderer::DepthRenderer; +use crate::scene_renderer::mesh::MeshRenderer; +use crate::scene_renderer::point::ScenePointRenderer; +use crate::ViewerRenderState; + /// Scene renderer pub struct SceneRenderer { /// Buffers of the scene @@ -49,61 +49,66 @@ pub struct SceneRenderer { /// Depth renderer pub depth_renderer: DepthRenderer, /// Interaction state - pub interaction: Interaction, + pub interaction: InteractionEnum, } impl SceneRenderer { /// Create a new scene renderer pub fn new( wgpu_render_state: &ViewerRenderState, - builder: &ViewerBuilder, + intrinsics: &DynCamera, depth_stencil: Option, + interaction: InteractionEnum, ) -> Self { let device = &wgpu_render_state.device; let depth_renderer = DepthRenderer::new( wgpu_render_state, - &builder.config.camera.intrinsics, + &intrinsics.image_size(), depth_stencil.clone(), ); - let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { - label: Some("scene group layout"), - entries: &[ - wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, + let uniform_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + label: Some("scene group layout"), + entries: &[ + // frustum uniform + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 1, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, + // view-transform uniform + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, }, - count: None, - }, - wgpu::BindGroupLayoutEntry { - binding: 2, - visibility: wgpu::ShaderStages::VERTEX, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, + // distortion table uniform + wgpu::BindGroupLayoutEntry { + binding: 2, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, }, - count: None, - }, - ], - }); + ], + }); - let texture_bind_group_layout = + let distortion_texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { entries: &[wgpu::BindGroupLayoutEntry { binding: 0, @@ -115,15 +120,42 @@ impl SceneRenderer { }, count: None, }], - label: Some("texture_bind_group_layout"), + label: Some("dist_texture_bind_group_layout"), + }); + + let background_texture_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("background_texture_bind_group_layout"), }); let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("scene pipeline"), - bind_group_layouts: &[&bind_group_layout, &texture_bind_group_layout], + bind_group_layouts: &[ + &uniform_bind_group_layout, + &distortion_texture_bind_group_layout, + &background_texture_bind_group_layout, + ], push_constant_ranges: &[], }); - let buffers = SceneRenderBuffers::new(wgpu_render_state, builder); + let buffers = SceneRenderBuffers::new(wgpu_render_state, intrinsics); Self { buffers, @@ -148,13 +180,7 @@ impl SceneRenderer { &pipeline_layout, depth_stencil, ), - interaction: Interaction { - maybe_pointer_state: None, - maybe_scroll_state: None, - maybe_scene_focus: None, - scene_from_camera: builder.config.camera.scene_from_camera, - clipping_planes: builder.config.camera.clipping_planes, - }, + interaction, } } @@ -162,9 +188,11 @@ impl SceneRenderer { &mut self, cam: &DynCamera, response: &egui::Response, + scales: &VecF32<2>, z_buffer: ArcImageF32, ) { - self.interaction.process_event(cam, response, &z_buffer); + self.interaction + .process_event(cam, response, scales, &z_buffer); } pub(crate) fn paint<'rp>( @@ -198,11 +226,13 @@ impl SceneRenderer { &mut render_pass, &self.buffers.bind_group, &self.buffers.dist_bind_group, + &self.buffers.background_bind_group, ); self.textured_mesh_renderer.paint( &mut render_pass, &self.buffers.bind_group, &self.buffers.dist_bind_group, + &self.buffers.background_bind_group, ); self.point_renderer.paint( &mut render_pass, @@ -247,11 +277,13 @@ impl SceneRenderer { &mut render_pass, &self.buffers.bind_group, &self.buffers.dist_bind_group, + &self.buffers.background_bind_group, ); self.textured_mesh_renderer.depth_paint( &mut render_pass, &self.buffers.bind_group, &self.buffers.dist_bind_group, + &self.buffers.background_bind_group, ); self.line_renderer.depth_paint( &mut render_pass, @@ -268,7 +300,12 @@ impl SceneRenderer { self.textured_mesh_renderer.vertices.clear(); } - pub(crate) fn prepare(&self, state: &ViewerRenderState, intrinsics: &DynCamera) { + pub(crate) fn prepare( + &self, + state: &ViewerRenderState, + intrinsics: &DynCamera, + background_image: &Option, + ) { state.queue.write_buffer( &self.point_renderer.vertex_buffer, 0, @@ -294,7 +331,7 @@ impl SceneRenderer { for i in 0..4 { for j in 0..4 { scene_from_camera_uniform[j][i] = - self.interaction.scene_from_camera.inverse().matrix()[(i, j)] as f32; + self.interaction.scene_from_camera().inverse().matrix()[(i, j)] as f32; } } state.queue.write_buffer( @@ -320,7 +357,7 @@ impl SceneRenderer { wgpu::ImageDataLayout { offset: 0, bytes_per_row: Some(8 * distort_lut.table.image_size().width as u32), - rows_per_image: None, + rows_per_image: Some(distort_lut.table.image_size().height as u32), }, self.buffers.dist_texture.size(), ); @@ -335,5 +372,23 @@ impl SceneRenderer { ]), ); } + + if let Some(background_image) = background_image { + state.queue.write_texture( + wgpu::ImageCopyTexture { + texture: &self.buffers.background_texture, + mip_level: 0, + origin: wgpu::Origin3d::ZERO, + aspect: wgpu::TextureAspect::All, + }, + bytemuck::cast_slice(background_image.tensor.scalar_view().as_slice().unwrap()), + wgpu::ImageDataLayout { + offset: 0, + bytes_per_row: Some(4 * background_image.image_size().width as u32), + rows_per_image: Some(background_image.image_size().height as u32), + }, + self.buffers.background_texture.size(), + ); + } } } diff --git a/crates/sophus_viewer/src/scene_renderer/buffers.rs b/crates/sophus_viewer/src/scene_renderer/buffers.rs index 8882312..3626e0d 100644 --- a/crates/sophus_viewer/src/scene_renderer/buffers.rs +++ b/crates/sophus_viewer/src/scene_renderer/buffers.rs @@ -1,9 +1,11 @@ -use crate::ViewerBuilder; -use crate::ViewerRenderState; -use sophus_sensor::distortion_table::DistortTable; use std::sync::Mutex; + +use sophus_sensor::distortion_table::DistortTable; +use sophus_sensor::DynCamera; use wgpu::util::DeviceExt; +use crate::ViewerRenderState; + #[repr(C)] // This is so we can store this in a buffer #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] @@ -43,10 +45,15 @@ pub struct SceneRenderBuffers { pub(crate) dist_texture: wgpu::Texture, pub(crate) dist_bind_group: wgpu::BindGroup, pub(crate) distortion_lut: Mutex>, + pub(crate) background_texture: wgpu::Texture, + pub(crate) background_bind_group: wgpu::BindGroup, } impl SceneRenderBuffers { - pub(crate) fn new(wgpu_render_state: &ViewerRenderState, builder: &ViewerBuilder) -> Self { + pub(crate) fn new( + wgpu_render_state: &ViewerRenderState, + intrinsics: &DynCamera, + ) -> Self { let device = &wgpu_render_state.device; let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { @@ -93,14 +100,14 @@ impl SceneRenderBuffers { ]; let transform_uniforms = Transforms { - width: builder.config.camera.intrinsics.image_size().width as f32, - height: builder.config.camera.intrinsics.image_size().height as f32, + width: intrinsics.image_size().width as f32, + height: intrinsics.image_size().height as f32, near: 0.1, far: 1000.0, - fx: builder.config.camera.intrinsics.pinhole_params()[0] as f32, - fy: builder.config.camera.intrinsics.pinhole_params()[1] as f32, - px: builder.config.camera.intrinsics.pinhole_params()[2] as f32, - py: builder.config.camera.intrinsics.pinhole_params()[3] as f32, + fx: intrinsics.pinhole_params()[0] as f32, + fy: intrinsics.pinhole_params()[1] as f32, + px: intrinsics.pinhole_params()[2] as f32, + py: intrinsics.pinhole_params()[3] as f32, }; let transform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { @@ -152,10 +159,78 @@ impl SceneRenderBuffers { }); let texture_size = wgpu::Extent3d { - width: builder.config.camera.intrinsics.image_size().width as u32, - height: builder.config.camera.intrinsics.image_size().height as u32, + width: intrinsics.image_size().width as u32, + height: intrinsics.image_size().height as u32, + depth_or_array_layers: 1, + }; + + let background_bind_group_layout = + device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { + entries: &[ + wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Texture { + multisampled: false, + view_dimension: wgpu::TextureViewDimension::D2, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 1, + visibility: wgpu::ShaderStages::FRAGMENT, + ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering), + count: None, + }, + ], + label: Some("background_texture_bind_group_layout"), + }); + + let background_texture_sampler = device.create_sampler(&wgpu::SamplerDescriptor { + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Linear, + ..Default::default() + }); + + let background_texture_size = wgpu::Extent3d { + width: intrinsics.image_size().width as u32, + height: intrinsics.image_size().height as u32, depth_or_array_layers: 1, }; + let background_texture = device.create_texture(&wgpu::TextureDescriptor { + size: background_texture_size, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8Unorm, + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, + label: Some("dist_texture"), + view_formats: &[], + }); + + let background_texture_view = + background_texture.create_view(&wgpu::TextureViewDescriptor::default()); + + let background_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &background_bind_group_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(&background_texture_view), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::Sampler(&background_texture_sampler), + }, + ], + label: Some("background_bind_group"), + }); + let dist_texture = device.create_texture(&wgpu::TextureDescriptor { size: texture_size, mip_level_count: 1, @@ -169,7 +244,7 @@ impl SceneRenderBuffers { let dist_texture_view = dist_texture.create_view(&wgpu::TextureViewDescriptor::default()); - let texture_bind_group_layout = + let dist_texture_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { entries: &[wgpu::BindGroupLayoutEntry { binding: 0, @@ -181,11 +256,11 @@ impl SceneRenderBuffers { }, count: None, }], - label: Some("texture_bind_group_layout"), + label: Some("dist_texture_bind_group_layout"), }); let dist_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &texture_bind_group_layout, + layout: &dist_texture_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&dist_texture_view), @@ -201,6 +276,8 @@ impl SceneRenderBuffers { dist_texture, dist_bind_group, distortion_lut: Mutex::new(None), + background_bind_group, + background_texture, } } } diff --git a/crates/sophus_viewer/src/scene_renderer/depth_renderer.rs b/crates/sophus_viewer/src/scene_renderer/depth_renderer.rs index b308ad8..7bc9e09 100644 --- a/crates/sophus_viewer/src/scene_renderer/depth_renderer.rs +++ b/crates/sophus_viewer/src/scene_renderer/depth_renderer.rs @@ -1,7 +1,8 @@ -use crate::ViewerRenderState; -use sophus_sensor::dyn_camera::DynCamera; +use sophus_image::ImageSize; use wgpu::DepthStencilState; +use crate::ViewerRenderState; + /// Depth renderer. pub struct DepthRenderer { pub(crate) _depth_stencil: Option, @@ -14,14 +15,14 @@ impl DepthRenderer { /// Create a new depth renderer. pub fn new( state: &ViewerRenderState, - cam: &DynCamera, + image_size: &ImageSize, depth_stencil: Option, ) -> Self { pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; let size = wgpu::Extent3d { - width: cam.image_size().width as u32, - height: cam.image_size().height as u32, + width: image_size.width as u32, + height: image_size.height as u32, depth_or_array_layers: 1, }; let desc = wgpu::TextureDescriptor { diff --git a/crates/sophus_viewer/src/scene_renderer/line.rs b/crates/sophus_viewer/src/scene_renderer/line.rs index b939690..2a2eaa6 100644 --- a/crates/sophus_viewer/src/scene_renderer/line.rs +++ b/crates/sophus_viewer/src/scene_renderer/line.rs @@ -1,11 +1,13 @@ -use crate::renderable::Line3; -use crate::ViewerRenderState; +use std::collections::BTreeMap; + use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; -use std::collections::BTreeMap; use wgpu::DepthStencilState; +use crate::renderables::renderable3d::Line3; +use crate::ViewerRenderState; + #[repr(C)] #[derive(Clone, Copy, Pod, Zeroable)] /// 3D line vertex diff --git a/crates/sophus_viewer/src/scene_renderer/mesh.rs b/crates/sophus_viewer/src/scene_renderer/mesh.rs index fbba60e..9784ac0 100644 --- a/crates/sophus_viewer/src/scene_renderer/mesh.rs +++ b/crates/sophus_viewer/src/scene_renderer/mesh.rs @@ -1,11 +1,13 @@ -use crate::renderable::Triangle3; -use crate::ViewerRenderState; +use std::collections::BTreeMap; + use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; -use std::collections::BTreeMap; use wgpu::DepthStencilState; +use crate::renderables::renderable3d::Triangle3; +use crate::ViewerRenderState; + #[repr(C)] #[derive(Clone, Copy, Pod, Zeroable)] /// 3D mesh vertex @@ -128,10 +130,12 @@ impl MeshRenderer { render_pass: &mut wgpu::RenderPass<'rp>, bind_group: &'rp wgpu::BindGroup, dist_bind_group: &'rp wgpu::BindGroup, + background_bind_group: &'rp wgpu::BindGroup, ) { render_pass.set_pipeline(&self.pipeline); render_pass.set_bind_group(0, bind_group, &[]); render_pass.set_bind_group(1, dist_bind_group, &[]); + render_pass.set_bind_group(2, background_bind_group, &[]); render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); render_pass.draw(0..self.vertices.len() as u32, 0..1); } @@ -141,10 +145,12 @@ impl MeshRenderer { depth_render_pass: &mut wgpu::RenderPass<'rp>, bind_group: &'rp wgpu::BindGroup, dist_bind_group: &'rp wgpu::BindGroup, + background_bind_group: &'rp wgpu::BindGroup, ) { depth_render_pass.set_pipeline(&self.depth_pipeline); depth_render_pass.set_bind_group(0, bind_group, &[]); depth_render_pass.set_bind_group(1, dist_bind_group, &[]); + depth_render_pass.set_bind_group(2, background_bind_group, &[]); depth_render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); depth_render_pass.draw(0..self.vertices.len() as u32, 0..1); } diff --git a/crates/sophus_viewer/src/scene_renderer/point.rs b/crates/sophus_viewer/src/scene_renderer/point.rs index 0d384e7..2108135 100644 --- a/crates/sophus_viewer/src/scene_renderer/point.rs +++ b/crates/sophus_viewer/src/scene_renderer/point.rs @@ -1,11 +1,13 @@ -use crate::renderable::Point3; -use crate::ViewerRenderState; +use std::collections::BTreeMap; + use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; -use std::collections::BTreeMap; use wgpu::DepthStencilState; +use crate::renderables::renderable3d::Point3; +use crate::ViewerRenderState; + #[repr(C)] #[derive(Clone, Copy, Pod, Zeroable)] /// 3D point vertex @@ -62,7 +64,7 @@ impl ScenePointRenderer { }); let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("mesh scene pipeline"), + label: Some("point scene pipeline"), layout: Some(pipeline_layout), vertex: wgpu::VertexState { module: &shader, diff --git a/crates/sophus_viewer/src/scene_renderer/textured_mesh.rs b/crates/sophus_viewer/src/scene_renderer/textured_mesh.rs index 769a1b4..be335cf 100644 --- a/crates/sophus_viewer/src/scene_renderer/textured_mesh.rs +++ b/crates/sophus_viewer/src/scene_renderer/textured_mesh.rs @@ -1,11 +1,13 @@ -use crate::renderable::TexturedTriangle3; -use crate::ViewerRenderState; +use std::collections::BTreeMap; + use bytemuck::Pod; use bytemuck::Zeroable; use eframe::egui_wgpu::wgpu::util::DeviceExt; -use std::collections::BTreeMap; use wgpu::DepthStencilState; +use crate::renderables::renderable3d::TexturedTriangle3; +use crate::ViewerRenderState; + #[repr(C)] #[derive(Clone, Copy, Pod, Zeroable)] /// 3D textured mesh vertex @@ -14,14 +16,6 @@ pub struct TexturedMeshVertex3 { pub(crate) _tex: [f32; 2], } -// pub(crate) struct TexturedMeshes { -// pub(crate) start_idx: u32, -// pub(crate) end_idx: u32, - -// pub texture: wgpu::Texture, -// pub bind_group: wgpu::BindGroup, -// } - /// Scene textured mesh renderer pub struct TexturedMeshRenderer { pub(crate) pipeline: wgpu::RenderPipeline, @@ -29,7 +23,6 @@ pub struct TexturedMeshRenderer { pub(crate) vertex_buffer: wgpu::Buffer, pub(crate) mesh_table: BTreeMap>, pub(crate) vertices: Vec, - //pub meshes: BTreeMap, } impl TexturedMeshRenderer { @@ -42,12 +35,12 @@ impl TexturedMeshRenderer { let device = &wgpu_render_state.device; let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { - label: Some("scene mesh shader"), + label: Some("texture scene mesh shader"), source: wgpu::ShaderSource::Wgsl( format!( "{} {}", include_str!("./utils.wgsl"), - include_str!("./mesh_scene_shader.wgsl") + include_str!("./textured_mesh_scene_shader.wgsl") ) .into(), ), @@ -77,7 +70,7 @@ impl TexturedMeshRenderer { }); let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("mesh scene pipeline"), + label: Some("textured mesh scene pipeline"), layout: Some(pipeline_layout), vertex: wgpu::VertexState { module: &shader, @@ -100,7 +93,7 @@ impl TexturedMeshRenderer { }); let depth_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { - label: Some("depth_mesh scene pipeline"), + label: Some("depth textured mesh scene pipeline"), layout: Some(pipeline_layout), vertex: wgpu::VertexState { module: &shader, @@ -108,7 +101,7 @@ impl TexturedMeshRenderer { buffers: &[wgpu::VertexBufferLayout { array_stride: std::mem::size_of::() as wgpu::BufferAddress, step_mode: wgpu::VertexStepMode::Vertex, - attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x4], + attributes: &wgpu::vertex_attr_array![0 => Float32x3, 1 => Float32x2], }], }, fragment: Some(wgpu::FragmentState { @@ -117,8 +110,7 @@ impl TexturedMeshRenderer { targets: &[Some(wgpu::TextureFormat::R32Float.into())], }), primitive: wgpu::PrimitiveState::default(), - - depth_stencil, + depth_stencil: depth_stencil.clone(), multisample: wgpu::MultisampleState::default(), multiview: None, }); @@ -129,19 +121,19 @@ impl TexturedMeshRenderer { vertex_buffer, vertices: vec![], mesh_table: BTreeMap::new(), - //meshes: BTreeMap::new(), } } - pub(crate) fn paint<'rp>( &'rp self, render_pass: &mut wgpu::RenderPass<'rp>, - bind_group: &'rp wgpu::BindGroup, - dist_bind_group: &'rp wgpu::BindGroup, + uniform_bind_group: &'rp wgpu::BindGroup, + distortion_bind_group: &'rp wgpu::BindGroup, + texture_bind_group: &'rp wgpu::BindGroup, ) { render_pass.set_pipeline(&self.pipeline); - render_pass.set_bind_group(0, bind_group, &[]); - render_pass.set_bind_group(1, dist_bind_group, &[]); + render_pass.set_bind_group(0, uniform_bind_group, &[]); + render_pass.set_bind_group(1, distortion_bind_group, &[]); + render_pass.set_bind_group(2, texture_bind_group, &[]); render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); render_pass.draw(0..self.vertices.len() as u32, 0..1); } @@ -151,10 +143,13 @@ impl TexturedMeshRenderer { depth_render_pass: &mut wgpu::RenderPass<'rp>, bind_group: &'rp wgpu::BindGroup, dist_bind_group: &'rp wgpu::BindGroup, + texture_bind_group: &'rp wgpu::BindGroup, ) { depth_render_pass.set_pipeline(&self.depth_pipeline); depth_render_pass.set_bind_group(0, bind_group, &[]); depth_render_pass.set_bind_group(1, dist_bind_group, &[]); + depth_render_pass.set_bind_group(2, texture_bind_group, &[]); + depth_render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..)); depth_render_pass.draw(0..self.vertices.len() as u32, 0..1); } diff --git a/crates/sophus_viewer/src/scene_renderer/textured_mesh_scene_shader.wgsl b/crates/sophus_viewer/src/scene_renderer/textured_mesh_scene_shader.wgsl index 77e3d0f..fdb8022 100644 --- a/crates/sophus_viewer/src/scene_renderer/textured_mesh_scene_shader.wgsl +++ b/crates/sophus_viewer/src/scene_renderer/textured_mesh_scene_shader.wgsl @@ -1,6 +1,6 @@ struct VertexOut { + @location(0) texCoords: vec2, @builtin(position) position: vec4, - @location(1) tex_coords: vec2, }; @group(0) @binding(0) @@ -15,27 +15,30 @@ var lut_uniform: DistortionLut; @group(1) @binding(0) var distortion_texture: texture_2d; +@group(2) @binding(0) +var mesh_texture: texture_2d; + +@group(2) @binding(1) +var mesh_texture_sampler: sampler; + + @vertex fn vs_main( - @location(0) position: vec3, - @location(1) tex_coords: vec2)-> VertexOut -{ + @location(0) position: vec3, + @location(1) texCoords: vec2 +) -> VertexOut { var out: VertexOut; var uv_z = scene_point_to_distorted(position, view_uniform, frustum_uniforms, lut_uniform); out.position = pixel_and_z_to_clip(uv_z.xy, uv_z.z, frustum_uniforms); - out.tex_coords = tex_coords; + out.texCoords = texCoords; // Pass texture coordinates to the fragment shader return out; } -@group(0) @binding(0) -var t_diffuse: texture_2d; -@group(0) @binding(1) -var s_diffuse: sampler; - @fragment fn fs_main(in: VertexOut) -> @location(0) vec4 { - return textureSample(t_diffuse, s_diffuse, in.tex_coords); + return textureSample(mesh_texture, mesh_texture_sampler, in.texCoords); + // return vec4(1.0, 0.0, 0.0, 1.0); } @fragment diff --git a/crates/sophus_viewer/src/simple_viewer.rs b/crates/sophus_viewer/src/simple_viewer.rs new file mode 100644 index 0000000..c3ee013 --- /dev/null +++ b/crates/sophus_viewer/src/simple_viewer.rs @@ -0,0 +1,509 @@ +use std::collections::HashMap; + +use eframe::egui; +use eframe::egui::load::SizedTexture; +use hollywood::actors::egui::EguiAppFromBuilder; +use hollywood::actors::egui::Stream; +use hollywood::compute::pipeline::CancelRequest; +use hollywood::RequestWithReplyChannel; +use linked_hash_map::LinkedHashMap; +use sophus_core::linalg::VecF32; +use sophus_image::arc_image::ArcImageF32; +use sophus_lie::Isometry3; +use sophus_sensor::dyn_camera::DynCamera; + +use crate::actor::ViewerBuilder; +use crate::pixel_renderer::LineVertex2; +use crate::pixel_renderer::PointVertex2; +use crate::renderables::Packet; +use crate::renderables::Packets; +use crate::scene_renderer::line::LineVertex3; +use crate::scene_renderer::mesh::MeshVertex3; +use crate::scene_renderer::point::PointVertex3; +use crate::scene_renderer::textured_mesh::TexturedMeshVertex3; +use crate::views::aspect_ratio::get_adjusted_view_size; +use crate::views::aspect_ratio::get_max_size; +use crate::views::view2d::View2d; +use crate::views::view3d::View3d; +use crate::views::View; +use crate::ViewerRenderState; + +/// The simple viewer top-level struct. +pub struct SimpleViewer { + state: ViewerRenderState, + views: LinkedHashMap, + message_recv: tokio::sync::mpsc::UnboundedReceiver>, + request_recv: + tokio::sync::mpsc::UnboundedReceiver>>, + cancel_request_sender: tokio::sync::mpsc::UnboundedSender, +} + +impl EguiAppFromBuilder for SimpleViewer { + fn new(builder: ViewerBuilder, render_state: ViewerRenderState) -> Box { + Box::new(SimpleViewer { + state: render_state.clone(), + views: LinkedHashMap::new(), + message_recv: builder.message_from_actor_recv, + request_recv: builder.in_request_from_actor_recv, + cancel_request_sender: builder.cancel_request_sender.unwrap(), + }) + } + + type Out = SimpleViewer; + + type State = ViewerRenderState; +} + +impl SimpleViewer { + fn add_renderables_to_tables(&mut self) { + loop { + let maybe_stream = self.message_recv.try_recv(); + if maybe_stream.is_err() { + break; + } + let stream = maybe_stream.unwrap(); + for packet in stream.msg.packets { + match packet { + Packet::View3d(packet) => View3d::update(&mut self.views, packet, &self.state), + Packet::View2d(packet) => View2d::update(&mut self.views, packet, &self.state), + } + } + } + + loop { + let maybe_request = self.request_recv.try_recv(); + if maybe_request.is_err() { + break; + } + let request = maybe_request.unwrap(); + let view_label = request.request.clone(); + if self.views.contains_key(&view_label) { + let view = self.views.get(&view_label).unwrap(); + if let View::View3d(view) = view { + request.reply(view.scene.interaction.scene_from_camera()); + } + } + } + + for (_, view) in self.views.iter_mut() { + match view { + View::View3d(view) => { + for (_, points) in view.scene.point_renderer.point_table.iter() { + for point in points.iter() { + let v = PointVertex3 { + _pos: [point.p[0], point.p[1], point.p[2]], + _color: [ + point.color.r, + point.color.g, + point.color.b, + point.color.a, + ], + _point_size: point.point_size, + }; + for _i in 0..6 { + view.scene.point_renderer.vertex_data.push(v); + } + } + } + for (_, lines) in view.scene.line_renderer.line_table.iter() { + for line in lines.iter() { + let p0 = line.p0; + let p1 = line.p1; + + let v0 = LineVertex3 { + _p0: [p0[0], p0[1], p0[2]], + _p1: [p1[0], p1[1], p1[2]], + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + let v1 = LineVertex3 { + _p0: [p0[0], p0[1], p0[2]], + _p1: [p1[0], p1[1], p1[2]], + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v1); + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v1); + view.scene.line_renderer.vertex_data.push(v1); + } + } + for (_, mesh) in view.scene.mesh_renderer.mesh_table.iter() { + for trig in mesh.iter() { + let v0 = MeshVertex3 { + _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + let v1 = MeshVertex3 { + _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + let v2 = MeshVertex3 { + _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + view.scene.mesh_renderer.vertices.push(v0); + view.scene.mesh_renderer.vertices.push(v1); + view.scene.mesh_renderer.vertices.push(v2); + } + } + for (_, mesh) in view.scene.textured_mesh_renderer.mesh_table.iter() { + for trig in mesh.iter() { + let v0 = TexturedMeshVertex3 { + _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], + _tex: [trig.tex0[0], trig.tex0[1]], + }; + let v1 = TexturedMeshVertex3 { + _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], + _tex: [trig.tex1[0], trig.tex1[1]], + }; + let v2 = TexturedMeshVertex3 { + _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], + _tex: [trig.tex2[0], trig.tex2[1]], + }; + view.scene.textured_mesh_renderer.vertices.push(v0); + view.scene.textured_mesh_renderer.vertices.push(v1); + view.scene.textured_mesh_renderer.vertices.push(v2); + } + } + } + View::View2d(view) => { + for (_, points) in view.pixel.point_renderer.points_table.iter() { + for point in points.iter() { + let v = PointVertex2 { + _pos: [point.p[0], point.p[1]], + _color: [ + point.color.r, + point.color.g, + point.color.b, + point.color.a, + ], + _point_size: point.point_size, + }; + for _i in 0..6 { + view.pixel.point_renderer.vertex_data.push(v); + } + } + } + for (_, lines) in view.pixel.line_renderer.lines_table.iter() { + println!("lines: {}", lines.len()); + + for line in lines.iter() { + let p0 = line.p0; + let p1 = line.p1; + let d = (p0 - p1).normalize(); + let normal = [d[1], -d[0]]; + + println!("p0: {:?}, p1: {:?}", p0, p1); + println!("d: {:?}, normal: {:?}", d, normal); + println!("color: {:?}", line.color); + + let v0 = LineVertex2 { + _pos: [p0[0], p0[1]], + _normal: normal, + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + let v1 = LineVertex2 { + _pos: [p1[0], p1[1]], + _normal: normal, + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + view.pixel.line_renderer.vertex_data.push(v0); + view.pixel.line_renderer.vertex_data.push(v0); + view.pixel.line_renderer.vertex_data.push(v1); + view.pixel.line_renderer.vertex_data.push(v0); + view.pixel.line_renderer.vertex_data.push(v1); + view.pixel.line_renderer.vertex_data.push(v1); + } + } + for (_, points) in view.scene.point_renderer.point_table.iter() { + for point in points.iter() { + let v = PointVertex3 { + _pos: [point.p[0], point.p[1], point.p[2]], + _color: [ + point.color.r, + point.color.g, + point.color.b, + point.color.a, + ], + _point_size: point.point_size, + }; + for _i in 0..6 { + view.scene.point_renderer.vertex_data.push(v); + } + } + } + for (_, lines) in view.scene.line_renderer.line_table.iter() { + for line in lines.iter() { + let p0 = line.p0; + let p1 = line.p1; + + let v0 = LineVertex3 { + _p0: [p0[0], p0[1], p0[2]], + _p1: [p1[0], p1[1], p1[2]], + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + let v1 = LineVertex3 { + _p0: [p0[0], p0[1], p0[2]], + _p1: [p1[0], p1[1], p1[2]], + _color: [line.color.r, line.color.g, line.color.b, line.color.a], + _line_width: line.line_width, + }; + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v1); + view.scene.line_renderer.vertex_data.push(v0); + view.scene.line_renderer.vertex_data.push(v1); + view.scene.line_renderer.vertex_data.push(v1); + } + } + for (_, mesh) in view.scene.mesh_renderer.mesh_table.iter() { + for trig in mesh.iter() { + let v0 = MeshVertex3 { + _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + let v1 = MeshVertex3 { + _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + let v2 = MeshVertex3 { + _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], + _color: [trig.color.r, trig.color.g, trig.color.b, trig.color.a], + }; + view.scene.mesh_renderer.vertices.push(v0); + view.scene.mesh_renderer.vertices.push(v1); + view.scene.mesh_renderer.vertices.push(v2); + } + } + for (_, mesh) in view.scene.textured_mesh_renderer.mesh_table.iter() { + for trig in mesh.iter() { + let v0 = TexturedMeshVertex3 { + _pos: [trig.p0[0], trig.p0[1], trig.p0[2]], + _tex: [trig.tex0[0], trig.tex0[1]], + }; + let v1 = TexturedMeshVertex3 { + _pos: [trig.p1[0], trig.p1[1], trig.p1[2]], + _tex: [trig.tex1[0], trig.tex1[1]], + }; + let v2 = TexturedMeshVertex3 { + _pos: [trig.p2[0], trig.p2[1], trig.p2[2]], + _tex: [trig.tex2[0], trig.tex2[1]], + }; + view.scene.textured_mesh_renderer.vertices.push(v0); + view.scene.textured_mesh_renderer.vertices.push(v1); + view.scene.textured_mesh_renderer.vertices.push(v2); + } + } + } + } + } + } +} + +struct Data { + rgba_tex_id: egui::TextureId, + depth_image: ArcImageF32, + intrinsics: DynCamera, +} + +impl eframe::App for SimpleViewer { + fn on_exit(&mut self, _gl: Option<&eframe::glow::Context>) { + self.cancel_request_sender.send(CancelRequest).unwrap(); + } + + fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) { + egui_extras::install_image_loaders(ctx); + + // Clear vertex data + for (_, view) in self.views.iter_mut() { + if let View::View3d(view) = view { + view.scene.clear_vertex_data(); + //view.pixel.clear_vertex_data(); + } + if let View::View2d(view) = view { + view.scene.clear_vertex_data(); + view.pixel.clear_vertex_data(); + } + } + + // Add renderables to tables + self.add_renderables_to_tables(); + + let mut view_data_map = LinkedHashMap::new(); + + // Prepare views and update background textures + for (view_label, view) in self.views.iter_mut() { + match view { + View::View3d(view) => { + view.scene.prepare(&self.state, &view.intrinsics, &None); + + let mut command_encoder = self + .state + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + view.scene.paint( + &mut command_encoder, + &view.offscreen.rgba_texture_view, + &view.scene.depth_renderer, + ); + + self.state.queue.submit(Some(command_encoder.finish())); + + let mut command_encoder = self + .state + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + view.scene.depth_paint( + &mut command_encoder, + &view.offscreen.depth_texture_view, + &view.scene.depth_renderer, + ); + + let depth_image = view.offscreen.download_images( + &self.state, + command_encoder, + &view.intrinsics.image_size(), + ); + + view_data_map.insert( + view_label.clone(), + Data { + rgba_tex_id: view.offscreen.rgba_tex_id, + depth_image, + intrinsics: view.intrinsics.clone(), + }, + ); + } + View::View2d(view) => { + view.scene + .prepare(&self.state, &view.intrinsics, &view.background_image); + view.pixel.prepare(&self.state); + + view.background_image = None; + + let mut command_encoder = self + .state + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + + view.scene.paint( + &mut command_encoder, + &view.offscreen.rgba_texture_view, + &view.scene.depth_renderer, + ); + view.pixel.paint( + &mut command_encoder, + &view.offscreen.rgba_texture_view, + &view.scene.depth_renderer, + ); + + self.state.queue.submit(Some(command_encoder.finish())); + + view.pixel + .show_interaction_marker(&self.state, &view.scene.interaction); + + let mut command_encoder = self + .state + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor::default()); + view.scene.depth_paint( + &mut command_encoder, + &view.offscreen.depth_texture_view, + &view.scene.depth_renderer, + ); + + let depth_image = view.offscreen.download_images( + &self.state, + command_encoder, + &view.intrinsics.image_size(), + ); + + view_data_map.insert( + view_label.clone(), + Data { + rgba_tex_id: view.offscreen.rgba_tex_id, + depth_image, + intrinsics: view.intrinsics.clone(), + }, + ); + } + } + } + + let mut responses = HashMap::new(); + + egui::CentralPanel::default().show(ctx, |ui| { + ui.scope(|ui0| { + if self.views.is_empty() { + return; + } + let (max_width, max_height) = get_max_size( + &self.views, + 0.95 * ui0.available_width(), + 0.95 * ui0.available_height(), + ); + + ui0.horizontal_wrapped(|ui| { + for (view_label, view_data) in view_data_map.iter() { + let adjusted_size = + get_adjusted_view_size(&self.views[view_label], max_width, max_height); + + let response = ui.add( + egui::Image::new(SizedTexture { + size: egui::Vec2::new( + view_data.intrinsics.image_size().width as f32, + view_data.intrinsics.image_size().height as f32, + ), + id: view_data.rgba_tex_id, + }) + .fit_to_exact_size(egui::Vec2 { + x: adjusted_size.0, + y: adjusted_size.1, + }) + .sense(egui::Sense::click_and_drag()), + ); + + responses.insert( + view_label.clone(), + ( + response, + VecF32::<2>::new( + view_data.intrinsics.image_size().width as f32 + / adjusted_size.0, + view_data.intrinsics.image_size().height as f32 + / adjusted_size.1, + ), + ), + ); + } + }); + }); + }); + + for (view_label, view) in self.views.iter_mut() { + match view { + View::View3d(view) => { + let response = responses.get(view_label).unwrap(); + let depth_image = view_data_map.get(view_label).unwrap().depth_image.clone(); + + view.scene.process_event( + &view.intrinsics, + &response.0, + &response.1, + depth_image, + ); + } + View::View2d(_) => {} + } + } + + ctx.request_repaint(); + } +} diff --git a/crates/sophus_viewer/src/views.rs b/crates/sophus_viewer/src/views.rs new file mode 100644 index 0000000..a9a0a29 --- /dev/null +++ b/crates/sophus_viewer/src/views.rs @@ -0,0 +1,41 @@ +/// aspect ratio +pub mod aspect_ratio; +/// 2d view +pub mod view2d; +/// 3d view +pub mod view3d; + +use sophus_image::ImageSize; +use sophus_sensor::DynCamera; + +use crate::views::aspect_ratio::HasAspectRatio; +use crate::views::view2d::View2d; +use crate::views::view3d::View3d; + +/// The view enum. +pub(crate) enum View { + /// 3D view + View3d(View3d), + /// Image view + View2d(View2d), +} + +impl HasAspectRatio for View { + fn aspect_ratio(&self) -> f32 { + match self { + View::View3d(view) => view.aspect_ratio(), + View::View2d(view) => view.aspect_ratio(), + } + } + + fn intrinsics(&self) -> &DynCamera { + match self { + View::View3d(view) => view.intrinsics(), + View::View2d(view) => view.intrinsics(), + } + } + + fn view_size(&self) -> ImageSize { + self.intrinsics().image_size() + } +} diff --git a/crates/sophus_viewer/src/views/aspect_ratio.rs b/crates/sophus_viewer/src/views/aspect_ratio.rs new file mode 100644 index 0000000..312466b --- /dev/null +++ b/crates/sophus_viewer/src/views/aspect_ratio.rs @@ -0,0 +1,63 @@ +use linked_hash_map::LinkedHashMap; +use sophus_image::ImageSize; +use sophus_sensor::DynCamera; + +use crate::views::View; + +pub(crate) trait HasAspectRatio { + fn aspect_ratio(&self) -> f32; + + fn view_size(&self) -> ImageSize; + + fn intrinsics(&self) -> &DynCamera; +} + +pub(crate) fn get_median_aspect_ratio(views: &LinkedHashMap) -> f32 { + let mut aspect_ratios = std::vec::Vec::with_capacity(views.len()); + for (_, widget) in views.iter() { + aspect_ratios.push(widget.aspect_ratio()); + } + aspect_ratios.sort_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal)); + let n = aspect_ratios.len(); + if n % 2 == 1 { + aspect_ratios[n / 2] + } else { + 0.5 * aspect_ratios[n / 2] + 0.5 * aspect_ratios[n / 2 - 1] + } +} + +pub(crate) fn get_max_size( + views: &LinkedHashMap, + available_width: f32, + available_height: f32, +) -> (f32, f32) { + let median_aspect_ratio = get_median_aspect_ratio(views); + + let mut max_width = 0.0; + let mut max_height = 0.0; + + let n = views.len() as u32; + for num_cols in 1..=n { + let num_rows: f32 = ((n as f32) / (num_cols as f32)).ceil(); + + let w: f32 = available_width / (num_cols as f32); + let h = (w / median_aspect_ratio).min(available_height / num_rows); + let w = median_aspect_ratio * h; + if w > max_width { + max_width = w; + max_height = h; + } + } + + (max_width, max_height) +} + +pub(crate) fn get_adjusted_view_size(view: &View, max_width: f32, max_height: f32) -> (f32, f32) { + let aspect_ratio = view.aspect_ratio(); + let view_size = view.view_size(); + let view_width = max_width.min(max_height * aspect_ratio); + let view_height = max_height.min(max_width / aspect_ratio); + let view_width = view_width.min(view_size.width as f32); + let view_height = view_height.min(view_size.height as f32); + (view_width, view_height) +} diff --git a/crates/sophus_viewer/src/views/view2d.rs b/crates/sophus_viewer/src/views/view2d.rs new file mode 100644 index 0000000..9b2f2d0 --- /dev/null +++ b/crates/sophus_viewer/src/views/view2d.rs @@ -0,0 +1,230 @@ +use linked_hash_map::LinkedHashMap; +use sophus_core::linalg::VecF64; +use sophus_image::arc_image::ArcImage4U8; +use sophus_image::ImageSize; +use sophus_lie::traits::IsTranslationProductGroup; +use sophus_lie::Isometry3; +use sophus_sensor::DynCamera; + +use crate::interactions::inplane_interaction::InplaneInteraction; +use crate::interactions::InteractionEnum; +use crate::interactions::WgpuClippingPlanes; +use crate::offscreen::OffscreenTexture; +use crate::pixel_renderer::PixelRenderer; +use crate::renderables::renderable2d::Renderable2d; +use crate::renderables::renderable2d::View2dPacket; +use crate::renderables::renderable3d::Renderable3d; +use crate::renderables::renderable3d::TexturedMesh3; +use crate::scene_renderer::SceneRenderer; +use crate::views::aspect_ratio::HasAspectRatio; +use crate::views::View; +use crate::ViewerRenderState; + +pub(crate) struct View2d { + pub(crate) scene: SceneRenderer, + pub(crate) pixel: PixelRenderer, + pub(crate) intrinsics: DynCamera, + pub(crate) offscreen: OffscreenTexture, + pub(crate) background_image: Option, +} + +impl View2d { + fn create_if_new( + views: &mut LinkedHashMap, + packet: &View2dPacket, + state: &ViewerRenderState, + ) -> bool { + if views.contains_key(&packet.view_label) { + return false; + } + if let Some(frame) = &packet.frame { + let depth_stencil = Some(wgpu::DepthStencilState { + format: wgpu::TextureFormat::Depth32Float, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }); + views.insert( + packet.view_label.clone(), + View::View2d(View2d { + scene: SceneRenderer::new( + state, + frame.intrinsics(), + depth_stencil.clone(), + InteractionEnum::InplaneInteraction(InplaneInteraction { + maybe_pointer_state: None, + maybe_scroll_state: None, + maybe_scene_focus: None, + _clipping_planes: WgpuClippingPlanes { + near: 0.1, + far: 1000.0, + }, + scene_from_camera: Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -5.0)), + }), + ), + pixel: PixelRenderer::new( + state, + &frame.intrinsics().image_size(), + depth_stencil, + ), + intrinsics: frame.intrinsics().clone(), + background_image: frame.maybe_image().cloned(), + offscreen: OffscreenTexture::new(state, &frame.intrinsics().image_size()), + }), + ); + return true; + } + + false + } + + pub fn update( + views: &mut LinkedHashMap, + packet: View2dPacket, + state: &ViewerRenderState, + ) { + Self::create_if_new(views, &packet, state); + let view = views.get_mut(&packet.view_label).unwrap(); + + let view = match view { + View::View2d(view) => view, + _ => panic!("View type mismatch"), + }; + + if let Some(frame) = packet.frame { + println!("!!!!!!!!!!!Got a new frame"); + let depth_stencil = Some(wgpu::DepthStencilState { + format: wgpu::TextureFormat::Depth32Float, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }); + + let new_intrinsics = frame.intrinsics(); + + // We got a new frame, hence we need to clear all renderables and then add the + // intrinsics and background image if present. The easiest and most error-proof way to + // do this is to create a new SceneRenderer and PixelRenderer and replace the old ones. + view.pixel = PixelRenderer::new( + state, + &frame.intrinsics().image_size(), + depth_stencil.clone(), + ); + + view.scene = SceneRenderer::new( + state, + frame.intrinsics(), + depth_stencil, + InteractionEnum::InplaneInteraction(InplaneInteraction { + maybe_pointer_state: None, + maybe_scroll_state: None, + maybe_scene_focus: None, + _clipping_planes: WgpuClippingPlanes { + near: 0.1, + far: 1000.0, + }, + scene_from_camera: Isometry3::from_t(&VecF64::<3>::new(0.0, 0.0, -5.0)), + }), + ); + + view.offscreen = OffscreenTexture::new(state, &new_intrinsics.image_size()); + + view.intrinsics = new_intrinsics.clone(); + if let Some(background_image) = frame.maybe_image() { + view.background_image = Some(background_image.clone()); + + let w = view.intrinsics.image_size().width; + let h = view.intrinsics.image_size().height; + + let far = 500.0; + + let p0 = view + .intrinsics + .cam_unproj_with_z(&VecF64::<2>::new(0.0, 0.0), far) + .cast(); + let p1 = view + .intrinsics + .cam_unproj_with_z(&VecF64::<2>::new(w as f64, 0.0), far) + .cast(); + let p2 = view + .intrinsics + .cam_unproj_with_z(&VecF64::<2>::new(0.0, h as f64), far) + .cast(); + let p3 = view + .intrinsics + .cam_unproj_with_z(&VecF64::<2>::new(w as f64, h as f64), far) + .cast(); + + let tex_mesh = TexturedMesh3::make(&[ + [(p0, [0.0, 0.0]), (p1, [1.0, 0.0]), (p2, [0.0, 1.0])], + [(p1, [1.0, 0.0]), (p2, [0.0, 1.0]), (p3, [1.0, 1.0])], + ]); + view.scene + .textured_mesh_renderer + .mesh_table + .insert("background".to_owned(), tex_mesh.mesh); + } + } + + let view = views.get_mut(&packet.view_label).unwrap(); + let view = match view { + View::View2d(view) => view, + _ => panic!("View type mismatch"), + }; + + for m in packet.renderables2d { + match m { + Renderable2d::Lines2(lines) => { + view.pixel + .line_renderer + .lines_table + .insert(lines.name, lines.lines); + } + Renderable2d::Points2(points) => { + view.pixel + .point_renderer + .points_table + .insert(points.name, points.points); + } + } + } + for m in packet.renderables3d { + match m { + Renderable3d::Lines3(lines3) => { + view.scene + .line_renderer + .line_table + .insert(lines3.name, lines3.lines); + } + Renderable3d::Points3(points3) => { + view.scene + .point_renderer + .point_table + .insert(points3.name, points3.points); + } + Renderable3d::Mesh3(mesh) => { + view.scene + .mesh_renderer + .mesh_table + .insert(mesh.name, mesh.mesh); + } + } + } + } +} + +impl HasAspectRatio for View2d { + fn aspect_ratio(&self) -> f32 { + self.intrinsics.image_size().aspect_ratio() + } + + fn view_size(&self) -> ImageSize { + self.intrinsics.image_size() + } + + fn intrinsics(&self) -> &DynCamera { + &self.intrinsics + } +} diff --git a/crates/sophus_viewer/src/views/view3d.rs b/crates/sophus_viewer/src/views/view3d.rs new file mode 100644 index 0000000..562fe13 --- /dev/null +++ b/crates/sophus_viewer/src/views/view3d.rs @@ -0,0 +1,109 @@ +use linked_hash_map::LinkedHashMap; +use sophus_image::ImageSize; +use sophus_sensor::DynCamera; + +use crate::interactions::orbit_interaction::OrbitalInteraction; +use crate::interactions::InteractionEnum; +use crate::offscreen::OffscreenTexture; +use crate::renderables::renderable3d::Renderable3d; +use crate::renderables::renderable3d::View3dPacket; +use crate::scene_renderer::SceneRenderer; +use crate::views::aspect_ratio::HasAspectRatio; +use crate::views::View; +use crate::ViewerRenderState; + +pub(crate) struct View3d { + pub(crate) scene: SceneRenderer, + pub(crate) intrinsics: DynCamera, + pub(crate) offscreen: OffscreenTexture, +} + +impl View3d { + fn create_if_new( + views: &mut LinkedHashMap, + packet: &View3dPacket, + state: &ViewerRenderState, + ) { + if !views.contains_key(&packet.view_label) { + let depth_stencil = Some(wgpu::DepthStencilState { + format: wgpu::TextureFormat::Depth32Float, + depth_write_enabled: true, + depth_compare: wgpu::CompareFunction::Less, + stencil: wgpu::StencilState::default(), + bias: wgpu::DepthBiasState::default(), + }); + views.insert( + packet.view_label.clone(), + View::View3d(View3d { + offscreen: OffscreenTexture::new( + state, + &packet.initial_camera.intrinsics.image_size(), + ), + scene: SceneRenderer::new( + state, + &packet.initial_camera.intrinsics, + depth_stencil, + InteractionEnum::OrbitalInteraction(OrbitalInteraction { + maybe_pointer_state: None, + maybe_scroll_state: None, + maybe_scene_focus: None, + clipping_planes: packet.initial_camera.clipping_planes, + scene_from_camera: packet.initial_camera.scene_from_camera, + }), + ), + intrinsics: packet.initial_camera.intrinsics.clone(), + }), + ); + } + } + + pub fn update( + views: &mut LinkedHashMap, + packet: View3dPacket, + state: &ViewerRenderState, + ) { + Self::create_if_new(views, &packet, state); + + let view = views.get_mut(&packet.view_label).unwrap(); + let view = match view { + View::View3d(view) => view, + _ => panic!("View type mismatch"), + }; + for m in packet.renderables3d { + match m { + Renderable3d::Lines3(lines3) => { + view.scene + .line_renderer + .line_table + .insert(lines3.name, lines3.lines); + } + Renderable3d::Points3(points3) => { + view.scene + .point_renderer + .point_table + .insert(points3.name, points3.points); + } + Renderable3d::Mesh3(mesh) => { + view.scene + .mesh_renderer + .mesh_table + .insert(mesh.name, mesh.mesh); + } + } + } + } +} + +impl HasAspectRatio for View3d { + fn aspect_ratio(&self) -> f32 { + self.intrinsics.image_size().aspect_ratio() + } + + fn view_size(&self) -> ImageSize { + self.intrinsics.image_size() + } + + fn intrinsics(&self) -> &DynCamera { + &self.intrinsics + } +}