Skip to content

Commit

Permalink
added tracking + depth sample
Browse files Browse the repository at this point in the history
  • Loading branch information
schmidtp1 committed Jul 11, 2019
1 parent 506b040 commit 3b14a5c
Show file tree
Hide file tree
Showing 7 changed files with 437 additions and 0 deletions.
1 change: 1 addition & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,3 +45,4 @@ add_subdirectory(pose-and-image)
add_subdirectory(trajectory)
add_subdirectory(ar-basic)
add_subdirectory(pose-apriltag)
add_subdirectory(tracking-and-depth)
93 changes: 93 additions & 0 deletions examples/example.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -813,6 +813,99 @@ void draw_pointcloud(float width, float height, glfw_state& app_state, rs2::poin
glPopAttrib();
}

void quat2mat(rs2_quaternion& q, GLfloat H[16]) // to column-major matrix
{
H[0] = 1 - 2*q.y*q.y - 2*q.z*q.z; H[4] = 2*q.x*q.y - 2*q.z*q.w; H[8] = 2*q.x*q.z + 2*q.y*q.w; H[12] = 0.0f;
H[1] = 2*q.x*q.y + 2*q.z*q.w; H[5] = 1 - 2*q.x*q.x - 2*q.z*q.z; H[9] = 2*q.y*q.z - 2*q.x*q.w; H[13] = 0.0f;
H[2] = 2*q.x*q.z - 2*q.y*q.w; H[6] = 2*q.y*q.z + 2*q.x*q.w; H[10] = 1 - 2*q.x*q.x - 2*q.y*q.y; H[14] = 0.0f;
H[3] = 0.0f; H[7] = 0.0f; H[11] = 0.0f; H[15] = 1.0f;
}

// Handles all the OpenGL calls needed to display the point cloud w.r.t. static reference frame
void draw_pointcloud_wrt_world(float width, float height, glfw_state& app_state, rs2::points& points, rs2_pose& pose, float H_t265_d400[16], std::vector<rs2_vector>& trajectory)
{
if (!points)
return;

// OpenGL commands that prep screen for the pointcloud
glLoadIdentity();
glPushAttrib(GL_ALL_ATTRIB_BITS);

glClearColor(153.f / 255, 153.f / 255, 153.f / 255, 1);
glClear(GL_DEPTH_BUFFER_BIT);

glMatrixMode(GL_PROJECTION);
glPushMatrix();
gluPerspective(60, width / height, 0.01f, 10.0f);


// viewing matrix
glMatrixMode(GL_MODELVIEW);
glPushMatrix();

// rotated from depth to world frame: z => -z, y => -y
glTranslatef(0, 0, -0.75f-app_state.offset_y*0.05f);
glRotated(app_state.pitch, 1, 0, 0);
glRotated(app_state.yaw, 0, -1, 0);
glTranslatef(0, 0, 0.5f);

// draw trajectory
glEnable(GL_DEPTH_TEST);
glLineWidth(2.0f);
glBegin(GL_LINE_STRIP);
for (auto&& v : trajectory)
{
glColor3f(0.0f, 1.0f, 0.0f);
glVertex3f(v.x, v.y, v.z);
}
glEnd();
glLineWidth(0.5f);
glColor3f(1.0f, 1.0f, 1.0f);

// T265 pose
GLfloat H_world_t265[16];
quat2mat(pose.rotation, H_world_t265);
H_world_t265[12] = pose.translation.x;
H_world_t265[13] = pose.translation.y;
H_world_t265[14] = pose.translation.z;

glMultMatrixf(H_world_t265);

// T265 to D4xx extrinsics
glMultMatrixf(H_t265_d400);


glPointSize(width / 640);
glEnable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, app_state.tex.get_gl_handle());
float tex_border_color[] = { 0.8f, 0.8f, 0.8f, 0.8f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, tex_border_color);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, 0x812F); // GL_CLAMP_TO_EDGE
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, 0x812F); // GL_CLAMP_TO_EDGE
glBegin(GL_POINTS);

/* this segment actually prints the pointcloud */
auto vertices = points.get_vertices(); // get vertices
auto tex_coords = points.get_texture_coordinates(); // and texture coordinates
for (int i = 0; i < points.size(); i++)
{
if (vertices[i].z)
{
// upload the point and texture coordinates only for points we have depth data for
glVertex3fv(vertices[i]);
glTexCoord2fv(tex_coords[i]);
}
}

// OpenGL cleanup
glEnd();
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glPopAttrib();
}

// Registers the state variable and callbacks to allow mouse control of the pointcloud
void register_glfw_callbacks(window& app, glfw_state& app_state)
{
Expand Down
20 changes: 20 additions & 0 deletions examples/tracking-and-depth/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2019 Intel Corporation. All Rights Reserved.
# minimum required cmake version: 3.1.0
cmake_minimum_required(VERSION 3.1.0)

project(RealsenseExamplesTrackingAndDepth)

if(BUILD_GRAPHICAL_EXAMPLES)
add_executable(rs-tracking-and-depth rs-tracking-and-depth.cpp ../example.hpp)
set_property(TARGET rs-tracking-and-depth PROPERTY CXX_STANDARD 11)
target_link_libraries(rs-tracking-and-depth ${DEPENDENCIES})
include_directories(rs-tracking-and-depth ../)
set_target_properties (rs-tracking-and-depth PROPERTIES FOLDER Examples)
install(TARGETS rs-tracking-and-depth RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
add_custom_command(
TARGET rs-tracking-and-depth POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
${CMAKE_CURRENT_SOURCE_DIR}/H_t265_d400.cfg
${CMAKE_CURRENT_BINARY_DIR}/H_t265_d400.cfg)
endif()
4 changes: 4 additions & 0 deletions examples/tracking-and-depth/H_t265_d400.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
0.999968402 -0.006753626 -0.004188075 -0.015890727
-0.006685408 -0.999848172 0.016093893 0.028273059
-0.004296131 -0.016065384 -0.999861654 -0.009375589

Binary file not shown.
179 changes: 179 additions & 0 deletions examples/tracking-and-depth/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
# rs-tracking-and-depth Sample

## Overview

This sample demonstrates an example use of
the tracking camera T265
together with a depth camera D4xx to
display a 3D pointcloud as generated by the D4xx
with respect to a static reference frame.
The pose of the depth camera is tracked by the tracking camera which is rigidly attached to it.

## Expected Output
The application should open a window with a pointcloud.
A static scene should appear static on the screen despite the camera can be moving.
This allows "scanning" the scene (frame-by-frame) by moving the camera. (Currently, no depth data is accumulated or filtered over time.)
Using your mouse, you should be able to interact with the pointcloud rotating and zooming using the mouse.

<img class="animated-gif" src="https://user-images.githubusercontent.com/28366639/61068360-b0647c80-a3be-11e9-96de-d48823800973.gif" width="800">

## Mechanical Setup
The provided [STL](./bracket_t265nd435_external.stl) can be 3D-printed
and used to attach T265 and D435
using **M3 screws, 10mm & 18mm**, respectively.
![snapshot00](https://user-images.githubusercontent.com/28366639/60457143-2b27dd80-9bf0-11e9-8286-757953d9013e.png)
If the mount is used in the depicted orientation with T265 below and D435 mounted above,
the provided [calibration](./H_t265_d400.txt) can be used to transform between point cloud and pose reference frame.

In the depicted orientation a **¼-20 nut** can be inserted into the 3D-print in order to mount the assembly on a tripod.

![IMG_1900_3](https://user-images.githubusercontent.com/28366639/61068929-1271b180-a3c0-11e9-9502-d30092c7506a.png)

Please note that the homogeneous transformation is expected to be provided as row-major 3x4 matrix of the form `H = [R, t]` where `R` is the rotation matrix and `t` the translation vector of the depth, i.e. infrared 1, frame, with respect to the T265 body frame (see above).


## Code Overview

This example is based on the [pointcloud example](../pointcloud/). Please also refer the respective documentation.

Similar to the [first tutorial](../capture/) we include the Cross-Platform API:
```cpp
#include <librealsense2/rs.hpp> // Include RealSense Cross Platform API
```

Next, we prepared a [very short helper library](../example.hpp) encapsulating basic OpenGL rendering and window management:
```cpp
#include "example.hpp" // Include short list of convenience functions for rendering
```

We also include the STL `<algorithm>` header for `std::min` and `std::max`,
and `<fstream>` for `std::ifstream`
to parse the homogeneous extrinsic matrix from a text file.

Next, we define a `state` struct and two helper functions. `state` and `register_glfw_callbacks` handle the pointcloud's rotation in the application, and `draw_pointcloud_wrt_world` makes all the OpenGL calls necessary to display the pointcloud.
```cpp
// Struct for managing rotation of pointcloud view
struct state { double yaw, pitch, last_x, last_y; bool ml; float offset_x, offset_y; texture tex; };

// Helper functions
void register_glfw_callbacks(window& app, state& app_state);
draw_pointcloud_wrt_world(float width, float height, glfw_state& app_state, rs2::points& points, rs2_pose& pose, float H_t265_d400[16]);
```
The `example.hpp` header lets us easily open a new window and prepare textures for rendering. The `state` class (declared above) is used for interacting with the mouse, with the help of some callbacks registered through glfw.
```cpp
// Create a simple OpenGL window for rendering:
window app(1280, 720, "RealSense Tracking and Depth Example");
// Construct an object to manage view state
state app_state = { 0, 0, 0, 0, false, 0, 0, 0 };
// register callbacks to allow manipulation of the pointcloud
register_glfw_callbacks(app, app_state);
```

We are going to use classes within the `rs2` namespace:
```cpp
using namespace rs2;
```

As part of the API we offer the `pointcloud` class which calculates a pointcloud and corresponding texture mapping from depth and color frames. To make sure we always have something to display, we also make a `rs2::points` object to store the results of the pointcloud calculation.
```cpp
// Declare pointcloud object, for calculating pointclouds and texture mappings
pointcloud pc = rs2::context().create_pointcloud();
// We want the points object to be persistent so we can display the last cloud when a frame drops
rs2::points points;
```

We declare a `rs2_pose` object to store the latest pose as reported by T265.
```cpp
rs2_pose pose;
```


To stream from multiple device please also refer to the [multicam example](../multicam/).
First a common context is created and a (separate) pipeline is started for each of the queried devices.
```cpp
// Start a streaming pipe per each connected device
for (auto&& dev : ctx.query_devices())
{
rs2::pipeline pipe(ctx);
rs2::config cfg;
cfg.enable_device(dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER));
pipe.start(cfg);
pipelines.emplace_back(pipe);
}
```

The extrinsics between the streams, namely depth and pose, are loaded from a configuration file
that has to be provided in form of a *row-major* homogeneous 4-by-4 matrix.

While the app is running,
we loop over the pipelines, wait for frames, before we collect the respective color, depth and pose frames:
```cpp
for (auto &&pipe : pipelines) // loop over pipelines
{
auto frames = pipe.wait_for_frames(); // Wait for the next set of frames from the camera
```
Using helper functions on the `frameset` object we check for new depth and color frames. We pass it to the `pointcloud` object to use as the texture, and also give it to OpenGL with the help of the `texture` class. We generate a new pointcloud.
```cpp
auto depth = frames.get_depth_frame();
// Generate the pointcloud and texture mappings
points = pc.calculate(depth);
auto color = frames.get_color_frame();
// Tell pointcloud object to map to this color frame
pc.map_to(color);
// Upload the color frame to OpenGL
app_state.tex.upload(color);
```

In a similar way we get the pose data from the pose frame
```cpp
auto pose_frame = frames.get_pose_frame();
if (pose_frame) {
pose = pose_frame.get_pose_data();
}
```

Finally we call `draw_pointcloud_wrt_world` to draw the pointcloud with respect to a common (fixed) world frame.
This is done by moving the observing camera according to the transformation reported by T265 and extrinsics to the depth stream (instead of transforming the scene by the inverse which results in the same relative motion).
```cpp
draw_pointcloud_wrt_world(app.width(), app.height(), app_state, points, pose, H_t265_d400);
```
`draw_pointcloud_wrt_world` primarily calls OpenGL, but the critical portion iterates over all the points in the pointcloud, and where we have depth data, we upload the point's coordinates and texture mapping coordinates to OpenGL.
```cpp
/* this segment actually prints the pointcloud */
auto vertices = points.get_vertices(); // get vertices
auto tex_coords = points.get_texture_coordinates(); // and texture coordinates
for (int i = 0; i < points.size(); i++)
{
if (vertices[i].z)
{
// upload the point and texture coordinates only for points we have depth data for
glVertex3fv(vertices[i]);
glTexCoord2fv(tex_coords[i]);
}
}
```

The second critical portion changes the viewport according to the provided transformation.
```cpp
// viewing matrix
glMatrixMode(GL_MODELVIEW);
glPushMatrix();

GLfloat H_world_t265[16];
quat2mat(pose.rotation, H_world_t265);
H_world_t265[12] = pose.translation.x;
H_world_t265[13] = pose.translation.y;
H_world_t265[14] = pose.translation.z;

glMultMatrixf(H_world_t265);
glMultMatrixf(H_t265_d400);
```
Loading

0 comments on commit 3b14a5c

Please sign in to comment.