diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 750eb3fd11..64a0ca6bbc 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -45,3 +45,4 @@ add_subdirectory(pose-and-image) add_subdirectory(trajectory) add_subdirectory(ar-basic) add_subdirectory(pose-apriltag) +add_subdirectory(tracking-and-depth) diff --git a/examples/example.hpp b/examples/example.hpp index e2ff8df752..ac0f24a1b3 100644 --- a/examples/example.hpp +++ b/examples/example.hpp @@ -813,6 +813,99 @@ void draw_pointcloud(float width, float height, glfw_state& app_state, rs2::poin glPopAttrib(); } +void quat2mat(rs2_quaternion& q, GLfloat H[16]) // to column-major matrix +{ + H[0] = 1 - 2*q.y*q.y - 2*q.z*q.z; H[4] = 2*q.x*q.y - 2*q.z*q.w; H[8] = 2*q.x*q.z + 2*q.y*q.w; H[12] = 0.0f; + H[1] = 2*q.x*q.y + 2*q.z*q.w; H[5] = 1 - 2*q.x*q.x - 2*q.z*q.z; H[9] = 2*q.y*q.z - 2*q.x*q.w; H[13] = 0.0f; + H[2] = 2*q.x*q.z - 2*q.y*q.w; H[6] = 2*q.y*q.z + 2*q.x*q.w; H[10] = 1 - 2*q.x*q.x - 2*q.y*q.y; H[14] = 0.0f; + H[3] = 0.0f; H[7] = 0.0f; H[11] = 0.0f; H[15] = 1.0f; +} + +// Handles all the OpenGL calls needed to display the point cloud w.r.t. static reference frame +void draw_pointcloud_wrt_world(float width, float height, glfw_state& app_state, rs2::points& points, rs2_pose& pose, float H_t265_d400[16], std::vector& trajectory) +{ + if (!points) + return; + + // OpenGL commands that prep screen for the pointcloud + glLoadIdentity(); + glPushAttrib(GL_ALL_ATTRIB_BITS); + + glClearColor(153.f / 255, 153.f / 255, 153.f / 255, 1); + glClear(GL_DEPTH_BUFFER_BIT); + + glMatrixMode(GL_PROJECTION); + glPushMatrix(); + gluPerspective(60, width / height, 0.01f, 10.0f); + + + // viewing matrix + glMatrixMode(GL_MODELVIEW); + glPushMatrix(); + + // rotated from depth to world frame: z => -z, y => -y + glTranslatef(0, 0, -0.75f-app_state.offset_y*0.05f); + glRotated(app_state.pitch, 1, 0, 0); + glRotated(app_state.yaw, 0, -1, 0); + glTranslatef(0, 0, 0.5f); + + // draw trajectory + glEnable(GL_DEPTH_TEST); + glLineWidth(2.0f); + glBegin(GL_LINE_STRIP); + for (auto&& v : trajectory) + { + glColor3f(0.0f, 1.0f, 0.0f); + glVertex3f(v.x, v.y, v.z); + } + glEnd(); + glLineWidth(0.5f); + glColor3f(1.0f, 1.0f, 1.0f); + + // T265 pose + GLfloat H_world_t265[16]; + quat2mat(pose.rotation, H_world_t265); + H_world_t265[12] = pose.translation.x; + H_world_t265[13] = pose.translation.y; + H_world_t265[14] = pose.translation.z; + + glMultMatrixf(H_world_t265); + + // T265 to D4xx extrinsics + glMultMatrixf(H_t265_d400); + + + glPointSize(width / 640); + glEnable(GL_DEPTH_TEST); + glEnable(GL_TEXTURE_2D); + glBindTexture(GL_TEXTURE_2D, app_state.tex.get_gl_handle()); + float tex_border_color[] = { 0.8f, 0.8f, 0.8f, 0.8f }; + glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, tex_border_color); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, 0x812F); // GL_CLAMP_TO_EDGE + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, 0x812F); // GL_CLAMP_TO_EDGE + glBegin(GL_POINTS); + + /* this segment actually prints the pointcloud */ + auto vertices = points.get_vertices(); // get vertices + auto tex_coords = points.get_texture_coordinates(); // and texture coordinates + for (int i = 0; i < points.size(); i++) + { + if (vertices[i].z) + { + // upload the point and texture coordinates only for points we have depth data for + glVertex3fv(vertices[i]); + glTexCoord2fv(tex_coords[i]); + } + } + + // OpenGL cleanup + glEnd(); + glPopMatrix(); + glMatrixMode(GL_PROJECTION); + glPopMatrix(); + glPopAttrib(); +} + // Registers the state variable and callbacks to allow mouse control of the pointcloud void register_glfw_callbacks(window& app, glfw_state& app_state) { diff --git a/examples/tracking-and-depth/CMakeLists.txt b/examples/tracking-and-depth/CMakeLists.txt new file mode 100644 index 0000000000..5ce02195b1 --- /dev/null +++ b/examples/tracking-and-depth/CMakeLists.txt @@ -0,0 +1,20 @@ +# License: Apache 2.0. See LICENSE file in root directory. +# Copyright(c) 2019 Intel Corporation. All Rights Reserved. +# minimum required cmake version: 3.1.0 +cmake_minimum_required(VERSION 3.1.0) + +project(RealsenseExamplesTrackingAndDepth) + +if(BUILD_GRAPHICAL_EXAMPLES) + add_executable(rs-tracking-and-depth rs-tracking-and-depth.cpp ../example.hpp) + set_property(TARGET rs-tracking-and-depth PROPERTY CXX_STANDARD 11) + target_link_libraries(rs-tracking-and-depth ${DEPENDENCIES}) + include_directories(rs-tracking-and-depth ../) + set_target_properties (rs-tracking-and-depth PROPERTIES FOLDER Examples) + install(TARGETS rs-tracking-and-depth RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) + add_custom_command( + TARGET rs-tracking-and-depth POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy + ${CMAKE_CURRENT_SOURCE_DIR}/H_t265_d400.cfg + ${CMAKE_CURRENT_BINARY_DIR}/H_t265_d400.cfg) +endif() diff --git a/examples/tracking-and-depth/H_t265_d400.cfg b/examples/tracking-and-depth/H_t265_d400.cfg new file mode 100644 index 0000000000..1ed87b97ad --- /dev/null +++ b/examples/tracking-and-depth/H_t265_d400.cfg @@ -0,0 +1,4 @@ +0.999968402 -0.006753626 -0.004188075 -0.015890727 +-0.006685408 -0.999848172 0.016093893 0.028273059 +-0.004296131 -0.016065384 -0.999861654 -0.009375589 + diff --git a/examples/tracking-and-depth/bracket_t265nd435_external.stl b/examples/tracking-and-depth/bracket_t265nd435_external.stl new file mode 100644 index 0000000000..cfaafe4eb0 Binary files /dev/null and b/examples/tracking-and-depth/bracket_t265nd435_external.stl differ diff --git a/examples/tracking-and-depth/readme.md b/examples/tracking-and-depth/readme.md new file mode 100644 index 0000000000..545e6585a5 --- /dev/null +++ b/examples/tracking-and-depth/readme.md @@ -0,0 +1,179 @@ +# rs-tracking-and-depth Sample + +## Overview + +This sample demonstrates an example use of +the tracking camera T265 +together with a depth camera D4xx to +display a 3D pointcloud as generated by the D4xx +with respect to a static reference frame. +The pose of the depth camera is tracked by the tracking camera which is rigidly attached to it. + +## Expected Output +The application should open a window with a pointcloud. +A static scene should appear static on the screen despite the camera can be moving. +This allows "scanning" the scene (frame-by-frame) by moving the camera. (Currently, no depth data is accumulated or filtered over time.) +Using your mouse, you should be able to interact with the pointcloud rotating and zooming using the mouse. + + + +## Mechanical Setup +The provided [STL](./bracket_t265nd435_external.stl) can be 3D-printed +and used to attach T265 and D435 +using **M3 screws, 10mm & 18mm**, respectively. +![snapshot00](https://user-images.githubusercontent.com/28366639/60457143-2b27dd80-9bf0-11e9-8286-757953d9013e.png) +If the mount is used in the depicted orientation with T265 below and D435 mounted above, +the provided [calibration](./H_t265_d400.txt) can be used to transform between point cloud and pose reference frame. + +In the depicted orientation a **ΒΌ-20 nut** can be inserted into the 3D-print in order to mount the assembly on a tripod. + +![IMG_1900_3](https://user-images.githubusercontent.com/28366639/61068929-1271b180-a3c0-11e9-9502-d30092c7506a.png) + +Please note that the homogeneous transformation is expected to be provided as row-major 3x4 matrix of the form `H = [R, t]` where `R` is the rotation matrix and `t` the translation vector of the depth, i.e. infrared 1, frame, with respect to the T265 body frame (see above). + + +## Code Overview + +This example is based on the [pointcloud example](../pointcloud/). Please also refer the respective documentation. + +Similar to the [first tutorial](../capture/) we include the Cross-Platform API: +```cpp +#include // Include RealSense Cross Platform API +``` + +Next, we prepared a [very short helper library](../example.hpp) encapsulating basic OpenGL rendering and window management: +```cpp +#include "example.hpp" // Include short list of convenience functions for rendering +``` + +We also include the STL `` header for `std::min` and `std::max`, +and `` for `std::ifstream` +to parse the homogeneous extrinsic matrix from a text file. + +Next, we define a `state` struct and two helper functions. `state` and `register_glfw_callbacks` handle the pointcloud's rotation in the application, and `draw_pointcloud_wrt_world` makes all the OpenGL calls necessary to display the pointcloud. +```cpp +// Struct for managing rotation of pointcloud view +struct state { double yaw, pitch, last_x, last_y; bool ml; float offset_x, offset_y; texture tex; }; + +// Helper functions +void register_glfw_callbacks(window& app, state& app_state); +draw_pointcloud_wrt_world(float width, float height, glfw_state& app_state, rs2::points& points, rs2_pose& pose, float H_t265_d400[16]); +``` + +The `example.hpp` header lets us easily open a new window and prepare textures for rendering. The `state` class (declared above) is used for interacting with the mouse, with the help of some callbacks registered through glfw. +```cpp +// Create a simple OpenGL window for rendering: +window app(1280, 720, "RealSense Tracking and Depth Example"); +// Construct an object to manage view state +state app_state = { 0, 0, 0, 0, false, 0, 0, 0 }; +// register callbacks to allow manipulation of the pointcloud +register_glfw_callbacks(app, app_state); +``` + +We are going to use classes within the `rs2` namespace: +```cpp +using namespace rs2; +``` + +As part of the API we offer the `pointcloud` class which calculates a pointcloud and corresponding texture mapping from depth and color frames. To make sure we always have something to display, we also make a `rs2::points` object to store the results of the pointcloud calculation. +```cpp +// Declare pointcloud object, for calculating pointclouds and texture mappings +pointcloud pc = rs2::context().create_pointcloud(); +// We want the points object to be persistent so we can display the last cloud when a frame drops +rs2::points points; +``` + +We declare a `rs2_pose` object to store the latest pose as reported by T265. +```cpp +rs2_pose pose; +``` + + +To stream from multiple device please also refer to the [multicam example](../multicam/). +First a common context is created and a (separate) pipeline is started for each of the queried devices. +```cpp +// Start a streaming pipe per each connected device +for (auto&& dev : ctx.query_devices()) +{ + rs2::pipeline pipe(ctx); + rs2::config cfg; + cfg.enable_device(dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER)); + pipe.start(cfg); + pipelines.emplace_back(pipe); +} +``` + +The extrinsics between the streams, namely depth and pose, are loaded from a configuration file +that has to be provided in form of a *row-major* homogeneous 4-by-4 matrix. + +While the app is running, +we loop over the pipelines, wait for frames, before we collect the respective color, depth and pose frames: +```cpp +for (auto &&pipe : pipelines) // loop over pipelines + { + auto frames = pipe.wait_for_frames(); // Wait for the next set of frames from the camera +``` + +Using helper functions on the `frameset` object we check for new depth and color frames. We pass it to the `pointcloud` object to use as the texture, and also give it to OpenGL with the help of the `texture` class. We generate a new pointcloud. +```cpp +auto depth = frames.get_depth_frame(); + +// Generate the pointcloud and texture mappings +points = pc.calculate(depth); + +auto color = frames.get_color_frame(); + +// Tell pointcloud object to map to this color frame +pc.map_to(color); + +// Upload the color frame to OpenGL +app_state.tex.upload(color); + +``` + +In a similar way we get the pose data from the pose frame +```cpp +auto pose_frame = frames.get_pose_frame(); +if (pose_frame) { + pose = pose_frame.get_pose_data(); +} +``` + +Finally we call `draw_pointcloud_wrt_world` to draw the pointcloud with respect to a common (fixed) world frame. +This is done by moving the observing camera according to the transformation reported by T265 and extrinsics to the depth stream (instead of transforming the scene by the inverse which results in the same relative motion). +```cpp +draw_pointcloud_wrt_world(app.width(), app.height(), app_state, points, pose, H_t265_d400); +``` + +`draw_pointcloud_wrt_world` primarily calls OpenGL, but the critical portion iterates over all the points in the pointcloud, and where we have depth data, we upload the point's coordinates and texture mapping coordinates to OpenGL. +```cpp +/* this segment actually prints the pointcloud */ +auto vertices = points.get_vertices(); // get vertices +auto tex_coords = points.get_texture_coordinates(); // and texture coordinates +for (int i = 0; i < points.size(); i++) +{ + if (vertices[i].z) + { + // upload the point and texture coordinates only for points we have depth data for + glVertex3fv(vertices[i]); + glTexCoord2fv(tex_coords[i]); + } +} +``` + +The second critical portion changes the viewport according to the provided transformation. +```cpp +// viewing matrix +glMatrixMode(GL_MODELVIEW); +glPushMatrix(); + +GLfloat H_world_t265[16]; +quat2mat(pose.rotation, H_world_t265); +H_world_t265[12] = pose.translation.x; +H_world_t265[13] = pose.translation.y; +H_world_t265[14] = pose.translation.z; + +glMultMatrixf(H_world_t265); +glMultMatrixf(H_t265_d400); +``` + diff --git a/examples/tracking-and-depth/rs-tracking-and-depth.cpp b/examples/tracking-and-depth/rs-tracking-and-depth.cpp new file mode 100644 index 0000000000..b33f1a0109 --- /dev/null +++ b/examples/tracking-and-depth/rs-tracking-and-depth.cpp @@ -0,0 +1,140 @@ +// License: Apache 2.0. See LICENSE file in root directory. +// Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved. + +#include // Include RealSense Cross Platform API +#include "example.hpp" // Include short list of convenience functions for rendering + +#include // std::min, std::max +#include // std::ifstream + +// Helper functions +void register_glfw_callbacks(window& app, glfw_state& app_state); + +float detR(float H[16]) { + return H[0]*(H[5]*H[10]-H[9]*H[6]) - H[4]*(H[1]*H[10]-H[2]*H[9]) + H[8]*(H[1]*H[6]-H[5]*H[2]); +} + +int main(int argc, char * argv[]) try +{ + // Create a simple OpenGL window for rendering: + window app(1280, 720, "RealSense Tracking and Depth Example"); + // Construct an object to manage view state + glfw_state app_state; + // register callbacks to allow manipulation of the pointcloud + register_glfw_callbacks(app, app_state); + + // Declare pointcloud object, for calculating pointclouds and texture mappings + rs2::pointcloud pc; + // We want the points object to be persistent so we can display the last cloud when a frame drops + rs2::points points; + // store pose and timestamp + rs2::pose_frame pose_frame(nullptr); + std::vector trajectory; + + rs2::context ctx; // Create librealsense context for managing devices + std::vector pipelines; + + // Start a streaming pipe per each connected device + for (auto&& dev : ctx.query_devices()) + { + rs2::pipeline pipe(ctx); + rs2::config cfg; + cfg.enable_device(dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER)); + pipe.start(cfg); + pipelines.emplace_back(pipe); + } + + // extrinsics + // depth w.r.t. tracking (column-major) + float H_t265_d400[16] = {1, 0, 0, 0, + 0,-1, 0, 0, + 0, 0,-1, 0, + 0, 0, 0, 1}; + std::string fn = "./H_t265_d400.cfg"; + std::ifstream ifs(fn); + if (!ifs.is_open()) { + std::cerr << "Couldn't open " << fn << std::endl; + return -1; + } + else { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 4; j++) { + ifs >> H_t265_d400[i+4*j]; // row-major to column-major + } + } + } + float det = detR(H_t265_d400); + if (fabs(1-det) > 1e-6) { + std::cerr << "Invalid homogeneous transformation matrix input (det != 1)" << std::endl; + return -1; + } + + while (app) // Application still alive? + { + for (auto &&pipe : pipelines) // loop over pipelines + { + // Wait for the next set of frames from the camera + auto frames = pipe.wait_for_frames(); + + + auto color = frames.get_color_frame(); + + // For cameras that don't have RGB sensor, we'll map the pointcloud to infrared instead of color + if (!color) + color = frames.get_infrared_frame(); + + // Tell pointcloud object to map to this color frame + if (color) + pc.map_to(color); + + auto depth = frames.get_depth_frame(); + + // Generate the pointcloud and texture mappings + if (depth) + points = pc.calculate(depth); + + // Upload the color frame to OpenGL + if (color) + app_state.tex.upload(color); + + + // pose + auto pose = frames.get_pose_frame(); + if (pose) { + pose_frame = pose; + + // Print the x, y, z values of the translation, relative to initial position + auto pose_data = pose.get_pose_data(); + std::cout << "\r" << "Device Position: " << std::setprecision(3) << std::fixed << pose_data.translation.x << " " << pose_data.translation.y << " " << pose_data.translation.z << " (meters)"; + + // add new point in the trajectory (if motion large enough to reduce size of traj. vector) + if (trajectory.size() == 0) + trajectory.push_back(pose_data.translation); + else { + rs2_vector prev = trajectory.back(); + rs2_vector curr = pose_data.translation; + if (sqrt(pow((curr.x - prev.x), 2) + pow((curr.y - prev.y), 2) + pow((curr.z - prev.z), 2)) > 0.002) + trajectory.push_back(pose_data.translation); + } + } + } + + // Draw the pointcloud + if (points && pose_frame) { + rs2_pose pose = pose_frame.get_pose_data(); + draw_pointcloud_wrt_world(app.width(), app.height(), app_state, points, pose, H_t265_d400, trajectory); + } + } + + return EXIT_SUCCESS; +} +catch (const rs2::error & e) +{ + std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl; + return EXIT_FAILURE; +} +catch (const std::exception & e) +{ + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; +}