Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Anbang latest #14

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion hloc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,13 @@ IO_root: "/mnt/data/UNav-IO"
devices: "cuda:0"

hloc:
retrieval_num: 50
retrieval_num: 100
implicit_num: 1
ransac_thre: 30
# match_type: 'nvs'
match_type: "lightglue"
batch_mode: true
load_all_maps: true
map_loading_keyframes_reload: 0

feature:
Expand Down
Binary file removed query_image.jpg
Binary file not shown.
Binary file removed src/UNav_core/__pycache__/__init__.cpython-38.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
12 changes: 8 additions & 4 deletions src/UNav_core/src/feature/local_matcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

class Local_matcher():
device='cuda' if torch.cuda.is_available() else "cpu"
def __init__(self, frame_name, map_data, threshold = 30, **feature_configs):
def __init__(self, frame_name, map_data, threshold = 10, **feature_configs):
local_feature = Local_extractor(feature_configs['local'])
self.local_feature_matcher = local_feature.matcher().to(self.device)

Expand Down Expand Up @@ -128,8 +128,12 @@ def lightglue_batch(self, parent, topk, feats0):

def lightglue(self, i, feats0):
# Fetch local features and landmarks for the selected frame
local_features = self.map_data[self.frame_name[i]]['local_features']
landmarks = self.map_data[self.frame_name[i]]['landmarks']
frame_data = self.map_data[self.frame_name[i]]

landmarks = frame_data['landmarks']

local_features = frame_data['local_features']

valid_keypoints_index = local_features['valid_keypoints_index']

feats1 = {
Expand All @@ -140,7 +144,7 @@ def lightglue(self, i, feats0):
}

# Create a mapping for valid landmarks using valid_keypoints_index
valid_landmarks = {valid_id: landmarks[valid_id] for valid_id in valid_keypoints_index}
valid_landmarks = {valid_id:landmark for valid_id,landmark in zip(valid_keypoints_index, landmarks)}

# Batch data transfer to GPU
pred = {
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
2 changes: 1 addition & 1 deletion src/UNav_core/src/navigation/command.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)

def actions(current_pose,path_list,scale):
def actions(trajectory):
xc, yc, an = current_pose
action_list = []
for p in path_list:
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
27 changes: 19 additions & 8 deletions src/UNav_core/src/track/hierarchical_localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def analyze_topk_results(self, topk_segments):

# Determine the segment with the highest total count
most_likely_segment = max(segment_wt_neighbor_counts, key=segment_wt_neighbor_counts.get)
success = (segment_wt_neighbor_counts[most_likely_segment] / len(topk_segments)) >= 0.3
success = (segment_wt_neighbor_counts[most_likely_segment] / len(topk_segments)) >= 0.1

return most_likely_segment, success

Expand Down Expand Up @@ -223,15 +223,18 @@ def feature_matching_lightglue(self,image,topk):
Match the local features between query image and retrieved database images
"""
with torch.inference_mode(): # Use torch.no_grad during inference
feats0 = self.local_feature_extractor(image)
image_np = np.array(image)
feats0 = self.local_feature_extractor(image_np)
pts0_list,pts1_list,lms_list=[],[],[]
max_len=0


valid_db_frame_name = []
for i in topk[0]:
pts0,pts1,lms=self.local_feature_matcher.lightglue(i, feats0)

feat_inliner_size=pts0.shape[0]
if feat_inliner_size>self.thre:
valid_db_frame_name.append(self.db_name[i])
pts0_list.append(pts0)
pts1_list.append(pts1)
lms_list.append(lms)
Expand All @@ -240,7 +243,7 @@ def feature_matching_lightglue(self,image,topk):
del pts0,pts1,lms
del self.query_desc, feats0
torch.cuda.empty_cache()
return pts0_list,pts1_list,lms_list,max_len
return valid_db_frame_name, pts0_list,pts1_list,lms_list,max_len

def feature_matching_superglue(self,image,topk):
"""
Expand Down Expand Up @@ -362,7 +365,9 @@ def _determine_next_segment(self, candidates):

def get_location(self, image):
self.logger.debug("Start image retrieval")

topk=self.global_retrieval(image)

valid_db_frame_name = []
next_segment_id = None

Expand All @@ -377,12 +382,18 @@ def get_location(self, image):
if self.batch_mode:
valid_db_frame_name, pts0_list,pts1_list,lms_list,max_matched_num=self.feature_matching_lightglue_batch(image,topk)
else:
pts0_list,pts1_list,lms_list,max_matched_num=self.feature_matching_lightglue(image,topk)
valid_db_frame_name, pts0_list,pts1_list,lms_list,max_matched_num=self.feature_matching_lightglue(image,topk)

self.logger.debug("Start geometric verification")
final_candidates, feature2D,landmark3D=self.geometric_verification(valid_db_frame_name, pts0_list, pts1_list, lms_list, max_matched_num)
next_segment_id = self._determine_next_segment(final_candidates)
if len(pts0_list)>0:
final_candidates, feature2D,landmark3D=self.geometric_verification(valid_db_frame_name, pts0_list, pts1_list, lms_list, max_matched_num)
if len(final_candidates)>0:
next_segment_id = self._determine_next_segment(final_candidates)
else:
return None, None
else:
return None, None

self.logger.debug("Estimate the camera pose using PnP algorithm")
pose=self.pnp(image,feature2D,landmark3D)

return pose, next_segment_id
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file removed src/__pycache__/app.cpython-310.pyc
Binary file not shown.
Binary file removed src/__pycache__/app.cpython-38.pyc
Binary file not shown.
Binary file removed src/__pycache__/config.cpython-38.pyc
Binary file not shown.
Binary file removed src/__pycache__/db.cpython-38.pyc
Binary file not shown.
Binary file removed src/__pycache__/server_manager.cpython-38.pyc
Binary file not shown.
Binary file removed src/__pycache__/socketio_handlers.cpython-38.pyc
Binary file not shown.
Binary file removed src/modules/__pycache__/__init__.cpython-310.pyc
Binary file not shown.
Binary file removed src/modules/__pycache__/__init__.cpython-38.pyc
Binary file not shown.
Binary file removed src/modules/__pycache__/app.cpython-38.pyc
Binary file not shown.
Binary file removed src/modules/__pycache__/db.cpython-38.pyc
Binary file not shown.
Binary file removed src/modules/__pycache__/models.cpython-38.pyc
Binary file not shown.
Binary file not shown.
Binary file removed src/modules/config/__pycache__/settings.cpython-38.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
41 changes: 39 additions & 2 deletions src/modules/routes/data_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
from PIL import Image
import io
import numpy as np
from utils.time_logger import TimeLogger
import time

time_logger = TimeLogger()

def register_data_routes(app, server, socketio):

Expand All @@ -28,6 +32,37 @@ def localize():

return jsonify({'pose': rounded_pose})

@app.route('/list_images', methods=['GET'])
def get_images_list():
"""
Return testing images list.
"""
config = server.config['location']
target_place = config['place']
target_building = config['building']
target_floor = config['floor']

# Build the path based on the place, building, and floor
data_path = os.path.join(server.root, "logs", target_place, target_building, target_floor)

if not os.path.exists(data_path):
return jsonify({"error": "Invalid path"}), 400

images_dict = {}

# Traverse through directories and collect image names
for root, ids, _ in os.walk(data_path):
# We are interested in directories that match ids (like '00150')
for id in ids:
image_dir = os.path.join(root, id, 'images')
try:
files = sorted(os.listdir(image_dir))
images_dict[id] = [f for f in files if f.endswith('.png')]
except:
pass

return jsonify(images_dict), 200

@app.route('/get_options', methods=['GET'])
def get_options():
"""
Expand Down Expand Up @@ -127,8 +162,10 @@ def planner():
data = request.json
session_id = data.get('session_id')

trajectory = server.handle_navigation(session_id)

navigation_start_time = time.time()
trajectory,_ = server.handle_navigation(session_id)
time_logger.log_navigation_time(navigation_start_time, trajectory)

socketio.emit('planner_update', {'trajectory': trajectory})
return jsonify({'trajectory': trajectory})
# except ValueError as e:
Expand Down
34 changes: 28 additions & 6 deletions src/modules/routes/frame_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
import os
import time
import cv2
import numpy as np
from utils.time_logger import TimeLogger

from utils.cache_manager import CacheManager

Expand All @@ -18,6 +20,7 @@
# Configuration for localization retries and timeouts
COARSE_LOCALIZE_THRESHOLD = 5 # Number of failures before doing a coarse localize
TIMEOUT_SECONDS = 20 # Time since the last successful localize before doing a coarse localize
time_logger = TimeLogger()

def register_frame_routes(app, server, socketio):
@app.route('/stream_frame', methods=['POST'])
Expand All @@ -34,18 +37,34 @@ def stream_frame():
# Convert to RGB format
r, g, b = frame.split()
frame = Image.merge("RGB", (b, g, r))

original_width, original_height = frame.size

new_width = 640
new_height = int((new_width / original_width) * original_height)

# Resize the image
resized_image = frame.resize((new_width, new_height))

image_np = np.array(resized_image)

if frame is not None:
client_frames[session_id] = frame
client_frames[session_id] = image_np
response_data = {'status': 'frame received'}

# Perform localization if requested
if do_localize:
pose_update_info = server.handle_localization(session_id, frame)
localization_start_time = time.time()
pose_update_info = server.handle_localization(session_id, image_np)
# Extract building and floor from pose_update_info, defaulting to 'N/A' if None
building = pose_update_info.get('building')
floor = pose_update_info.get('floor')
time_logger.log_localization_time(session_id, building, floor, localization_start_time, pose_update_info)

response_data['pose'] = pose_update_info.get('pose')

buffered = io.BytesIO()
frame.save(buffered, format="JPEG")
resized_image.save(buffered, format="JPEG")
new_frame_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
response_data['floorplan_base64'] = pose_update_info.get('floorplan_base64')
socketio.emit('camera_frame', {'session_id': session_id, 'frame': new_frame_base64})
Expand Down Expand Up @@ -76,12 +95,15 @@ def get_frame(client_id):
else:
return jsonify({'error': 'No frame available for this client'}), 404

@app.route('/get_image/<id>/<image_name>', methods=['GET'])
def get_image(id, image_name):
@app.route('/get_image/<id>/<imageName>', methods=['POST'])
def get_image(id, imageName):
"""
Retrieve a specific image associated with a session and image name.
"""
image_path = os.path.join(server.root, 'logs', server.config['location']['place'], server.config['location']['building'], server.config['location']['floor'], id, 'images', image_name)
data = request.json
session_id = data.get('username')

image_path = os.path.join(server.root, 'logs', server.config['location']['place'], server.config['location']['building'], server.config['location']['floor'], id, 'images', imageName)
if os.path.exists(image_path):
return send_file(image_path, mimetype='image/png')
else:
Expand Down
Binary file not shown.
Loading