diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py new file mode 100644 index 00000000..32211898 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -0,0 +1,242 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from PIL import Image +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +# os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + ''' + self.lock = threading.Lock() + self.threshold_image_lock = threading.Lock() + self.color_image_lock = threading.Lock() + ''' + self.cont = 0 + self.iteration = 0 + + # self.previous_timestamp = 0 + # self.previous_image = 0 + + self.previous_commanded_throttle = None + self.previous_commanded_steer = None + self.previous_commanded_brake = None + self.suddenness_distance = [] + self.suddenness_distance_throttle = [] + self.suddenness_distance_steer = [] + self.suddenness_distance_break_command = [] + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + self.world = client.get_world() + self.world.unload_map_layer(carla.MapLayer.Particles) + + time.sleep(5) + self.vehicle = self.world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + cropped = image[230:-1,:] + if self.cont < 20: + self.cont += 1 + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(200, 66) + img = cv2.resize(cropped, image_shape)/255.0 + + """AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"]""" + + #velocity_dim = np.full((150, 50), 0.5) + velocity_normalize = np.interp(self.previous_speed, (0, 100), (0, 1)) + velocity_dim = np.full((66, 200), velocity_normalize) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img) + self.inference_times.append(time.time() - start_time) + throttle_brake_val = np.interp(prediction[0][0], (0, 1), (-1, 1)) + steer = np.interp(prediction[0][1], (0, 1), (-1, 1)) + if throttle_brake_val >= 0: # throttle + throttle = throttle_brake_val + break_command = 0 + else: # brake + throttle = 0 + break_command = -1*throttle_brake_val + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if self.cont < 20: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + if self.previous_commanded_throttle != None: + a = np.array((throttle, steer, break_command)) + b = np.array((self.previous_commanded_throttle, self.previous_commanded_steer, self.previous_commanded_brake)) + distance = np.linalg.norm(a - b) + self.suddenness_distance.append(distance) + + a = np.array((throttle)) + b = np.array((self.previous_commanded_throttle)) + distance_throttle = np.linalg.norm(a - b) + self.suddenness_distance_throttle.append(distance_throttle) + + a = np.array((steer)) + b = np.array((self.previous_commanded_steer)) + distance_steer = np.linalg.norm(a - b) + self.suddenness_distance_steer.append(distance_steer) + + a = np.array((break_command)) + b = np.array((self.previous_commanded_brake)) + distance_break_command = np.linalg.norm(a - b) + self.suddenness_distance_break_command.append(distance_break_command) + + self.previous_commanded_throttle = throttle + self.previous_commanded_steer = steer + self.previous_commanded_brake = break_command + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py similarity index 86% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py rename to behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py index 13373d43..881ec4db 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py +++ b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py @@ -1,8 +1,11 @@ -from torchvision import transforms from PIL import Image from brains.f1.torch_utils.pilotnet import PilotNet from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 import numpy as np @@ -43,8 +46,9 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.gpu_inference = config['GPU'] self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') self.first_image = None - self.transformations = transforms.Compose([ - transforms.ToTensor() + self.transformations = Compose([ + Normalize(), + ToTensorV2() ]) self.suddenness_distance = [] @@ -119,10 +123,10 @@ def execute(self): self.update_frame('frame_0', bird_eye_view_1) try: - #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) img = cv2.resize(bird_eye_view_1, (int(66), int(200))) - img = Image.fromarray(img) - image = self.transformations(img).unsqueeze(0) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) image = FLOAT(image).to(self.device) start_time = time.time() @@ -137,14 +141,19 @@ def execute(self): speed = self.vehicle.get_velocity() vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) - if vehicle_speed < 5: - self.motors.sendThrottle(1.0) - self.motors.sendSteer(0.0) - self.motors.sendBrake(0) - else: - self.motors.sendThrottle(throttle) + if vehicle_speed > 30: + self.motors.sendThrottle(0) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) except Exception as err: print(err) diff --git a/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_optimized.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_optimized.py new file mode 100644 index 00000000..892b53a7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_optimized.py @@ -0,0 +1,162 @@ +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.first_image = None + self.transformations = Compose([ + Normalize(), + ToTensorV2() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + #if config['UseOptimized']: + self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device) +# self.clean_model() + #else: + # self.net = PilotNet((200,66,3), 3).to(self.device) + # self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + # self.net.eval() + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0) + self.motors.sendSteer(steer) + self.motors.sendBrake(1.0) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + diff --git a/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py new file mode 100644 index 00000000..581b61d6 --- /dev/null +++ b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -0,0 +1,165 @@ +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.first_image = None + self.transformations = Compose([ + Normalize(), + ToTensorV2() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + if config['UseOptimized']: + print('Loading optimized model...') + print(PRETRAINED_MODELS + model) + + # Float y half TensorRT + self.net = torch.jit.load(PRETRAINED_MODELS + model).eval().to(self.device) +# self.clean_model() + else: + self.net = PilotNet((200,66,3), 3).to(self.device) + self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + diff --git a/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py new file mode 100644 index 00000000..4cd278bf --- /dev/null +++ b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -0,0 +1,170 @@ +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda') + self.first_image = None + self.transformations = Compose([ + Normalize(), + ToTensorV2() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + if config['UseOptimized']: + print('Loading optimized model TensorRT INT8...') + print(PRETRAINED_MODELS + model) + + # INT 8 TensorRT + import torch_tensorrt + self.net = torch.jit.load(PRETRAINED_MODELS + model).eval().to(self.device) + compile_spec = {"inputs": [torch_tensorrt.Input([1, 3, 200, 66])], + "enabled_precisions": torch.int8, + } + self.net = torch_tensorrt.compile(self.net, **compile_spec).to(self.device) + else: + self.net = PilotNet((200,66,3), 3).to(self.device) + self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py similarity index 96% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py index 23d0de1e..fd9de629 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py @@ -57,7 +57,10 @@ def __init__(self, sensors, actuators, handler, model, config=None): world = client.get_world() time.sleep(5) - self.vehicle = world.get_actors().filter('vehicle.*')[0] + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break if model: if not path.exists(PRETRAINED_MODELS + model): @@ -132,7 +135,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_V_MAX_30.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py similarity index 96% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py index a597449c..baeeb01c 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py @@ -19,6 +19,8 @@ from tensorflow.python.framework.errors_impl import NotFoundError from tensorflow.python.framework.errors_impl import UnimplementedError +from tensorflow.python.saved_model import signature_constants +from tensorflow.python.saved_model import tag_constants import tensorflow as tf #import os @@ -64,8 +66,8 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("** Load TF model **") logger.info("Using TensorRT models.....") - self.net = tf.saved_model.load(PRETRAINED_MODELS + model) - self.infer = self.net.signatures['serving_default'] + self.net = tf.saved_model.load(PRETRAINED_MODELS + model, tags=[tag_constants.SERVING]) + self.infer = self.net.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self.output_tensorname = list(self.infer.structured_outputs.keys())[0] self.inf_func = self.tftrt_inference @@ -150,7 +152,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py similarity index 89% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py index 80d4a15c..af6517e5 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py @@ -24,9 +24,9 @@ import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -gpus = tf.config.experimental.list_physical_devices('GPU') -for gpu in gpus: - tf.config.experimental.set_memory_growth(gpu, True) +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) class Brain: @@ -66,7 +66,7 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("Using TF lite models.....") self.net = tf.lite.Interpreter(model_path= PRETRAINED_MODELS + model) self.net.allocate_tensors() - self.input_index = self.net.get_input_details()[0]["index"] + self.input_index = self.net.get_input_details()[0] self.output_index = self.net.get_output_details()[0]["index"] self.inf_func = self.optim_inference @@ -89,12 +89,22 @@ def optim_inference(self, img): output -- prediction from the model """ # Pre-processing - self.net.set_tensor(self.input_index, img) + if self.net.get_input_details()[0]['dtype'] == np.uint8: + input_scale, input_zero_point = self.net.get_input_details()[0]["quantization"] + img = img / input_scale + input_zero_point + img = img.astype(self.net.get_input_details()[0]["dtype"]) + + self.net.set_tensor(self.input_index["index"], img.astype(self.input_index["dtype"])) # Run inference. self.net.invoke() # Post-processing output = self.net.get_tensor(self.output_index) + if self.net.get_input_details()[0]['dtype'] == np.uint8: + output_scale, input_zero_point = self.net.get_output_details()[0]["quantization"] + output = output.astype(np.float32) + output = (output - input_zero_point) * output_scale + return output def update_frame(self, frame_id, data): @@ -154,7 +164,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py diff --git a/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py b/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py index b1e65b70..34ef83e2 100755 --- a/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py +++ b/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py @@ -105,7 +105,6 @@ def save_model(): highest_reward = cumulated_reward nextState = ''.join(map(str, observation)) -# print(nextState) qlearn.learn(state, action, reward, nextState) @@ -120,8 +119,6 @@ def save_model(): if step > 3000: print("\n\nLAP COMPLETED!!\n\n") - # print("Obser: {} - Rew: {}".format(observation, reward)) - if episode % 100 == 0: plotter.plot(env) if settings.save_model: diff --git a/behavior_metrics/brains/f1/brain_f1_follow_line_q_learn.py b/behavior_metrics/brains/f1/brain_f1_follow_line_qlearn.py similarity index 100% rename from behavior_metrics/brains/f1/brain_f1_follow_line_q_learn.py rename to behavior_metrics/brains/f1/brain_f1_follow_line_qlearn.py diff --git a/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py b/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py index d3c13061..41b065d9 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py +++ b/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py @@ -94,24 +94,9 @@ def step(self, action) -> Tuple: points = self.processed_image(f1_image_camera.data) state = self.calculate_observation(points) - center = float(self.config.center_image - points[0]) / ( - float(self.config.width) // 2 - ) done = False - center = abs(center) - - if center > 0.9: - done = True - if not done: - if 0 <= center <= 0.2: - reward = 10 - elif 0.2 < center <= 0.4: - reward = 2 - else: - reward = 1 - else: - reward = -100 + reward = 0 return state, reward, done, {} diff --git a/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py b/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py index f9e12bdb..75856e3c 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py +++ b/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py @@ -30,7 +30,6 @@ def reset(self): return Reset.reset_f1_state_image(self) else: return Reset.reset_f1_state_sp(self) - return Reset.reset_f1_state_sp(self) def step(self, action, step): from .step import ( diff --git a/behavior_metrics/brains/f1/rl_utils/models/rewards.py b/behavior_metrics/brains/f1/rl_utils/models/rewards.py index 80da0995..6ac8a19d 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/rewards.py +++ b/behavior_metrics/brains/f1/rl_utils/models/rewards.py @@ -75,15 +75,7 @@ def rewards_followline_center(self, center, rewards): original for Following Line """ done = False - if center > 0.9: - done = True - reward = rewards["penal"] - elif 0 <= center <= 0.2: - reward = rewards["from_10"] - elif 0.2 < center <= 0.4: - reward = rewards["from_02"] - else: - reward = rewards["from_01"] + reward = 0 return reward, done diff --git a/behavior_metrics/brains/f1/rl_utils/models/step.py b/behavior_metrics/brains/f1/rl_utils/models/step.py index 102a640f..5e619801 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/step.py +++ b/behavior_metrics/brains/f1/rl_utils/models/step.py @@ -48,13 +48,10 @@ def step_followline_state_image_actions_discretes(self, action, step): return state, reward, done, {} def step_followline_state_sp_actions_discretes(self, action, step): - # self._gazebo_unpause() vel_cmd = Twist() vel_cmd.linear.x = self.actions[action][0] vel_cmd.angular.z = self.actions[action][1] self.vel_pub.publish(vel_cmd) - # time.sleep(0.1) - # self._gazebo_pause() ##==== get image from sensor camera f1_image_camera, _ = self.f1gazeboimages.get_camera_info() diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json new file mode 100644 index 00000000..d687d1c9 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json @@ -0,0 +1,161 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.5, "y": 0.0, "z": 0.8, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 640, + "image_size_y": 480 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + } + ] +} diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json new file mode 100644 index 00000000..70124e1f --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json @@ -0,0 +1,170 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + }, + { + "type": "vehicle.audi.a2", + "id": "parked_car" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json new file mode 100644 index 00000000..816ebda5 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json @@ -0,0 +1,166 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json new file mode 100644 index 00000000..19e2556f --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json @@ -0,0 +1,166 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "walker.pedestrian.0001", + "id": "pedestrian" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json new file mode 100644 index 00000000..f4079106 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json @@ -0,0 +1,174 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + }, + { + "type": "vehicle.audi.a2", + "id": "parked_car" + }, + { + "type": "walker.pedestrian.0001", + "id": "pedestrian" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json new file mode 100644 index 00000000..46758eb4 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json @@ -0,0 +1,178 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.audi.a2", + "id": "npc_vehicle_1", + "spawn_point": {"x": 60.0, "y": 2.0, "z": 1.37, "roll": 0.0, "pitch": 0.0, "yaw": 180.0}, + "sensors": + [ + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + } + ] + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch b/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch new file mode 100644 index 00000000..0c67adb0 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch index 061dd0db..45db5b41 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch @@ -10,7 +10,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch new file mode 100644 index 00000000..739a3b53 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch new file mode 100644 index 00000000..e8869fcb --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch new file mode 100644 index 00000000..ff07c3f4 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch @@ -0,0 +1,61 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch new file mode 100644 index 00000000..62473842 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch new file mode 100644 index 00000000..df81a82e --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch index 9a833b83..7375d454 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch @@ -10,7 +10,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch index 4fa3bebe..ef688bd2 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch @@ -18,7 +18,7 @@ - + @@ -35,7 +35,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch new file mode 100644 index 00000000..30b69148 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch index 867ad933..d0880035 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch @@ -9,7 +9,8 @@ - + + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch index 5be79bf0..f3f963c4 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch @@ -9,7 +9,8 @@ - + + diff --git a/behavior_metrics/configs/default-multiple.yml b/behavior_metrics/configs/default-multiple.yml index 226c514b..58da4295 100644 --- a/behavior_metrics/configs/default-multiple.yml +++ b/behavior_metrics/configs/default-multiple.yml @@ -16,10 +16,10 @@ Behaviors: Topic: '/F1ROS/cmd_vel' MaxV: 3 MaxW: 0.3 - BrainPath: ['brains/f1/brain_f1_keras.py', 'brains/f1/brain_f1_keras.py'] + BrainPath: ['brains/f1/brain_f1_follow_line_dqn.py', 'brains/f1/brain_f1_follow_line_qlearn.py'] PilotTimeCycle: 50 Parameters: - Model: ['model_deepest_lstm_cropped_250_norm_max_pooling.h5', 'model_deepest_lstm_cropped_250_norm_test.h5'] +# Model: ['model_deepest_lstm_cropped_250_norm_max_pooling.h5', 'model_deepest_lstm_cropped_250_norm_test.h5'] ImageCropped: True ImageSize: [100,50] ImageNormalized: True @@ -29,8 +29,9 @@ Behaviors: Experiment: Name: "Experiment name" Description: "Experiment description" - Timeout: [30, 30] - Repetitions: 2 + Timeout: [5, 5] + UseWorldTimeouts: [5, 5] + Repetitions: 5 Simulation: World: ['/opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch', '/opt/jderobot/share/jderobot/gazebo/launch/many_curves.launch'] RealTimeUpdateRate: 1000 diff --git a/behavior_metrics/configs/default-rl-qlearn.yml b/behavior_metrics/configs/default-rl-qlearn.yml index 3909475b..1f220904 100644 --- a/behavior_metrics/configs/default-rl-qlearn.yml +++ b/behavior_metrics/configs/default-rl-qlearn.yml @@ -17,7 +17,7 @@ Behaviors: MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/f1/brain_f1_follow_line_q_learn.py' + BrainPath: 'brains/f1/brain_f1_follow_line_qlearn.py' PilotTimeCycle: 50 Parameters: ImageTranform: '' diff --git a/behavior_metrics/configs/default_carla.yml b/behavior_metrics/configs/default_carla.yml index bacb14c2..a25024b3 100644 --- a/behavior_metrics/configs/default_carla.yml +++ b/behavior_metrics/configs/default_carla.yml @@ -33,12 +33,13 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' - PilotTimeCycle: 100 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 50 # Turn up to reduce number of control decisions + AsyncMode: True # Set to False to control simulator time Parameters: - Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + Model: '20230724-125225_pilotnet_CARLA_19_05_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_OPTIMIZATION_PAPER_UPDATED_SHAPE_pretrained_cp.h5' ImageCropped: True - ImageSize: [ 100,50 ] + ImageSize: [ 200,66 ] ImageNormalized: True PredictionsNormalized: True GPU: True @@ -46,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/configs/default_carla_autopilot.yml b/behavior_metrics/configs/default_carla_autopilot.yml new file mode 100644 index 00000000..5421f3ff --- /dev/null +++ b/behavior_metrics/configs/default_carla_autopilot.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + + BrainPath: 'brains/CARLA/brain_carla_autopilot.py' + PilotTimeCycle: 300 + Parameters: + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + RandomSpawnPoint: False + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise.launch + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_multiple.yml b/behavior_metrics/configs/default_carla_multiple.yml index a27cd8be..d1f9a756 100644 --- a/behavior_metrics/configs/default_carla_multiple.yml +++ b/behavior_metrics/configs/default_carla_multiple.yml @@ -35,32 +35,17 @@ Behaviors: MaxW: 0.3 BrainPath: [ - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', ] - PilotTimeCycle: 100 + PilotTimeCycle: 50 + AsyncMode: True Parameters: Model: [ - '20230125-120238_pilotnet_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_PAPER.h5', - '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5', - - '20230130-113041_pilotnet_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_t_5_t_10_dataset_AFFINE_PAPER_cp.h5', - '20230127-175740_memDCCP_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_AFFINE_PAPER_cp.h5', - - '20230127-180655_pilotnet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE.h5', - '20230127-180856_deepestLSTMTinyPilotNet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE_PAPER.h5', - - '20230130-122642_pilotnet_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5', - '20230128-113936_memDCCP_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5' + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_fp32', + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_fp16', + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_int8_2', ] ImageCropped: True ImageSize: [ 100,50 ] @@ -74,68 +59,68 @@ Behaviors: Name: "Experiment name" Description: "Experiment description" UseWorldTimeouts: True - Timeout: [10, 10, 10, 10, 10, 10, 10] # for each world! + Timeout: [10] # for each world! Repetitions: 5 Simulation: World: [ - configs/CARLA_launch_files/town_01_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_01_anticlockwise_no_gui.launch, configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_03_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_04_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_05_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_06_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_03_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_04_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_05_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_06_anticlockwise_no_gui.launch, configs/CARLA_launch_files/town_07_anticlockwise_no_gui.launch ] - RandomSpawnPoint: True + RandomSpawnPoint: False SpawnPoints: [ [ - "10.0, 2.0, 1.37, 0.0, 0.0, 180.0", + "10.0, 2.0, 1.37, 0.0, 0.0, 180.0", "300.0, -330.0, 1.37, 0.0, 0.0, 0.0", "397.0, -50.0, 1.37, 0.0, 0.0, 90.0", "392.0, -50.0, 1.37, 0.0, 0.0, -90.0", "20.0, -327.0, 1.37, 0.0, 0.0, 180.0" - ], - [ - "55.3, -105.6, 1.37, 0.0, 0.0, 180.0", - "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", - "-3.0, -270.6, 1.37, 0.0, 0.0, 90.0", - "100.0, -303.0, 1.37, 0.0, 0.0, 180.0", - "190.0, -150.0, 1.37, 0.0, 0.0, -90.0" - ], - [ - "246.0, 150.0, 1.37, 0.0, 0.0, 90.0", - "243.0, -100., 1.37, 0.0, 0.0, 90.0", - "-88.0, 170, 1.37, 0.0, 0.0, -90.0", - "232.0, 0.0, 1.37, 0.0, 0.0, -90.0", - "-50.0, 195, 1.37, 0.0, 0.0, 0.0" - ], - [ - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", - "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", - "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0" - ], - [ - "20, -187.5, 1.37, 0.0, 0.0, 180.0", - "210.1, -87.3, 1.37, 0.0, 0.0, 90.0", - "189, -87.3, 1.37, 0.0, 0.0, -90.0", - "20, -187.5, 1.37, 0.0, 0.0, 180.0", - "210.1, -87.3, 1.37, 0.0, 0.0, 90.0" - ], - [ - "659.0, -70.5, 1.37, 0.0, 0.0, -90.0", - "351.5, 10.5, 1.37, 0.0, 0.0, 0.0", - "351.5, 24.5, 1.37, 0.0, 0.0, 180.0", - "672.5, -70.5, 1.37, 0.0, 0.0, 90.0", - "659.0, -70.5, 1.37, 0.0, 0.0, -90.0" - ], - [ - "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", - "70.5, 5.0, 1.37, 0.0, 0.0, 60.0", - "-184.5, -107.2, 1.37, 0.0, 0.0, 180.0", - "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", - "70.5, 5.0, 1.37, 0.0, 0.0, 60.0" + ], + [ + "55.3, -105.6, 1.37, 0.0, 0.0, 180.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-3.0, -270.6, 1.37, 0.0, 0.0, 90.0", + "100.0, -303.0, 1.37, 0.0, 0.0, 180.0", + "190.0, -150.0, 1.37, 0.0, 0.0, -90.0" + ], + [ + "246.0, 150.0, 1.37, 0.0, 0.0, 90.0", + "243.0, -100., 1.37, 0.0, 0.0, 90.0", + "-88.0, 170, 1.37, 0.0, 0.0, -90.0", + "232.0, 0.0, 1.37, 0.0, 0.0, -90.0", + "-50.0, 195, 1.37, 0.0, 0.0, 0.0" + ], + [ + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", + "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", + "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0" + ], + [ + "20, -187.5, 1.37, 0.0, 0.0, 180.0", + "210.1, -87.3, 1.37, 0.0, 0.0, 90.0", + "189, -87.3, 1.37, 0.0, 0.0, -90.0", + "20, -187.5, 1.37, 0.0, 0.0, 180.0", + "210.1, -87.3, 1.37, 0.0, 0.0, 90.0" + ], + [ + "659.0, -70.5, 1.37, 0.0, 0.0, -90.0", + "351.5, 10.5, 1.37, 0.0, 0.0, 0.0", + "351.5, 24.5, 1.37, 0.0, 0.0, 180.0", + "672.5, -70.5, 1.37, 0.0, 0.0, 90.0", + "659.0, -70.5, 1.37, 0.0, 0.0, -90.0" + ], + [ + "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", + "70.5, 5.0, 1.37, 0.0, 0.0, 60.0", + "-184.5, -107.2, 1.37, 0.0, 0.0, 180.0", + "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", + "70.5, 5.0, 1.37, 0.0, 0.0, 60.0" ] ] Dataset: diff --git a/behavior_metrics/configs/default_carla_multiple_simple.yml b/behavior_metrics/configs/default_carla_multiple_simple.yml index 3daed2e5..5b371e25 100644 --- a/behavior_metrics/configs/default_carla_multiple_simple.yml +++ b/behavior_metrics/configs/default_carla_multiple_simple.yml @@ -31,7 +31,7 @@ Behaviors: MaxW: 0.3 BrainPath: [ - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' ] PilotTimeCycle: 100 Parameters: diff --git a/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml b/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml new file mode 100644 index 00000000..bb014e02 --- /dev/null +++ b/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml @@ -0,0 +1,120 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + + BrainPath: [ + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py', + + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + ] + PilotTimeCycle: 50 + AsyncMode: True + Parameters: + Model: [ + 'pilotnet.h5', + + 'optimized_pilotnet_models_tensorflow/pilotnet_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_dynamic_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_int_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_intflt_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_float16_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_quant_aware.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pruned.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pruned_quan.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_cqat_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pqat_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pcqat_model.tflite' + ] + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Experiment: + Name: "Experiment name" + Description: "Experiment description" + UseWorldTimeouts: True + Timeout: [10] # for each world! + Repetitions: 5 + Simulation: + World: [ + configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch, + ] + RandomSpawnPoint: False + SpawnPoints: [ + [ + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0" + ] + ] + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_parked_bike.yml b/behavior_metrics/configs/default_carla_parked_bike.yml new file mode 100644 index 00000000..340db817 --- /dev/null +++ b/behavior_metrics/configs/default_carla_parked_bike.yml @@ -0,0 +1,74 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_parked_bike_car.yml b/behavior_metrics/configs/default_carla_parked_bike_car.yml new file mode 100644 index 00000000..7761704d --- /dev/null +++ b/behavior_metrics/configs/default_carla_parked_bike_car.yml @@ -0,0 +1,74 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_parked_vehicle.yml b/behavior_metrics/configs/default_carla_parked_vehicle.yml index 0a71b604..e99f3656 100644 --- a/behavior_metrics/configs/default_carla_parked_vehicle.yml +++ b/behavior_metrics/configs/default_carla_parked_vehicle.yml @@ -33,8 +33,9 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_pedestrian.yml b/behavior_metrics/configs/default_carla_pedestrian.yml new file mode 100644 index 00000000..92d463ae --- /dev/null +++ b/behavior_metrics/configs/default_carla_pedestrian.yml @@ -0,0 +1,74 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml new file mode 100644 index 00000000..d3d36a8f --- /dev/null +++ b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml @@ -0,0 +1,74 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_single_ad_npc.yml b/behavior_metrics/configs/default_carla_single_ad_npc.yml new file mode 100644 index 00000000..6b4eb063 --- /dev/null +++ b/behavior_metrics/configs/default_carla_single_ad_npc.yml @@ -0,0 +1,75 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 50 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch + WaypointPublisher: configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml new file mode 100644 index 00000000..6215ddeb --- /dev/null +++ b/behavior_metrics/configs/default_carla_subjective_vision.yml @@ -0,0 +1,74 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py' + PilotTimeCycle: 100 + AsyncMode: False + Parameters: + Model: '20230428-103315_pilotnet_model_3_151_cp.h5' + ImageCropped: True + ImageSize: [ 200,66 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_02_anticlockwise_low.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_tensor_rt.yml b/behavior_metrics/configs/default_carla_tensor_rt.yml index ae173095..6034475d 100644 --- a/behavior_metrics/configs/default_carla_tensor_rt.yml +++ b/behavior_metrics/configs/default_carla_tensor_rt.yml @@ -33,12 +33,13 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py' PilotTimeCycle: 1 + AsyncMode: False Parameters: - Model: 'pilotnet_tftrt_int8' + Model: 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_int8_2' ImageCropped: True - ImageSize: [ 100,50 ] + ImageSize: [ 200,66 ] ImageNormalized: True PredictionsNormalized: True GPU: True @@ -46,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/configs/default_carla_tf_lite.yml b/behavior_metrics/configs/default_carla_tf_lite.yml index f3f1b38e..e69de29b 100644 --- a/behavior_metrics/configs/default_carla_tf_lite.yml +++ b/behavior_metrics/configs/default_carla_tf_lite.yml @@ -1,73 +0,0 @@ -Behaviors: - Robot: - Sensors: - Cameras: - Camera_0: - Name: 'camera_0' - Topic: '/carla/ego_vehicle/rgb_front/image' - Camera_1: - Name: 'camera_1' - Topic: '/carla/ego_vehicle/rgb_view/image' - Camera_2: - Name: 'camera_2' - Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' - Camera_3: - Name: 'camera_3' - Topic: '/carla/ego_vehicle/dvs_front/image' - Pose3D: - Pose3D_0: - Name: 'pose3d_0' - Topic: '/carla/ego_vehicle/odometry' - BirdEyeView: - BirdEyeView_0: - Name: 'bird_eye_view_0' - Topic: '' - Speedometer: - Speedometer_0: - Name: 'speedometer_0' - Topic: '/carla/ego_vehicle/speedometer' - Actuators: - CARLA_Motors: - Motors_0: - Name: 'motors_0' - Topic: '/carla/ego_vehicle/vehicle_control_cmd' - MaxV: 3 - MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py' - PilotTimeCycle: 1 - Parameters: - Model: 'optimized_pilotnet_models/pilotnet_dynamic_quant.tflite' - ImageCropped: True - ImageSize: [ 100,50 ] - ImageNormalized: True - PredictionsNormalized: True - GPU: True - UseOptimized: True - ImageTranform: '' - Type: 'CARLA' - Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch - RandomSpawnPoint: False - Dataset: - In: '/tmp/my_bag.bag' - Out: '' - Stats: - Out: './' - PerfectLap: './perfect_bags/lap-simple-circuit.bag' - Layout: - Frame_0: - Name: frame_0 - Geometry: [1, 1, 1, 1] - Data: rgbimage - Frame_1: - Name: frame_1 - Geometry: [0, 1, 1, 1] - Data: rgbimage - Frame_2: - Name: frame_2 - Geometry: [0, 2, 1, 1] - Data: rgbimage - Frame_3: - Name: frame_3 - Geometry: [1, 2, 1, 1] - Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_torch.yml b/behavior_metrics/configs/default_carla_torch.yml index cdfdee4d..4479ddad 100644 --- a/behavior_metrics/configs/default_carla_torch.yml +++ b/behavior_metrics/configs/default_carla_torch.yml @@ -33,10 +33,11 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py' - PilotTimeCycle: 100 + BrainPath: 'brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py' + PilotTimeCycle: 50 + AsyncMode: True Parameters: - Model: 'pilot_net_model_best_123.pth' + Model: 'pilotnet_model.pth' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True @@ -46,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb new file mode 100644 index 00000000..902cdbea Binary files /dev/null and b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb differ diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb new file mode 100644 index 00000000..34cd4e36 --- /dev/null +++ b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb @@ -0,0 +1,7 @@ + +ý)root"_tf_keras_network*Û){"name": "model", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": []}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense", "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_1", "inbound_nodes": [[["dense", 0, 0, {}]]]}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_2", "inbound_nodes": [[["dense_1", 0, 0, {}]]]}], "input_layers": [["input_1", 0, 0]], "output_layers": [["dense_2", 0, 0]]}, "shared_object_id": 10, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 1]}, "ndim": 2, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 1]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 1]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 1]}, "float32", "input_1"]}, "keras_version": "2.11.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": [], "shared_object_id": 0}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense", "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_1", "inbound_nodes": [[["dense", 0, 0, {}]]], "shared_object_id": 6}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_2", "inbound_nodes": [[["dense_1", 0, 0, {}]]], "shared_object_id": 9}], "input_layers": [["input_1", 0, 0]], "output_layers": [["dense_2", 0, 0]]}}, "training_config": {"loss": "mse", "metrics": null, "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Custom>Adam", "config": {"name": "Adam", "weight_decay": null, "clipnorm": null, "global_clipnorm": null, "clipvalue": null, "use_ema": false, "ema_momentum": 0.99, "ema_overwrite_frequency": null, "jit_compile": true, "is_legacy_optimizer": false, "learning_rate": 0.004999999888241291, "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-07, "amsgrad": false}}}}2 +ö root.layer-0"_tf_keras_input_layer*Æ{"class_name": "InputLayer", "name": "input_1", "dtype": "float32", "sparse": false, "ragged": false, "batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}}2 +ªroot.layer_with_weights-0"_tf_keras_layer*ó{"name": "dense", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 1}}, "shared_object_id": 12}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 1]}}2 +®root.layer_with_weights-1"_tf_keras_layer*÷{"name": "dense_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dense", 0, 0, {}]]], "shared_object_id": 6, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 16}}, "shared_object_id": 13}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16]}}2 +±root.layer_with_weights-2"_tf_keras_layer*ú{"name": "dense_2", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dense_1", 0, 0, {}]]], "shared_object_id": 9, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 16}}, "shared_object_id": 14}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16]}}2 +¹Proot.keras_api.metrics.0"_tf_keras_metric*‚{"class_name": "Mean", "name": "loss", "dtype": "float32", "config": {"name": "loss", "dtype": "float32"}, "shared_object_id": 15}2 \ No newline at end of file diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb new file mode 100644 index 00000000..412f461c Binary files /dev/null and b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb differ diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 new file mode 100644 index 00000000..7cfad826 Binary files /dev/null and b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 differ diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index new file mode 100644 index 00000000..b14faf61 Binary files /dev/null and b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index differ diff --git a/behavior_metrics/pilot_carla.py b/behavior_metrics/pilot_carla.py index 5b587bcb..15df0bf6 100644 --- a/behavior_metrics/pilot_carla.py +++ b/behavior_metrics/pilot_carla.py @@ -23,7 +23,7 @@ from robot.actuators import Actuators from robot.sensors import Sensors from utils.logger import logger -from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED +from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED, ROOT_PATH from rosgraph_msgs.msg import Clock from carla_msgs.msg import CarlaControl @@ -88,6 +88,8 @@ def __init__(self, configuration, controller, brain_path, experiment_model=None) self.real_time_update_rate = 1000 self.pilot_start_time = 0 self.time_cycle = self.configuration.pilot_time_cycle + self.async_mode = self.configuration.async_mode + self.waypoint_publisher_path = self.configuration.waypoint_publisher_path def __wait_carla(self): """Wait for simulator to be initialized""" @@ -125,14 +127,20 @@ def run(self): control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() - control_command.command = 1 + control_command.command = 1 # PAUSE control_pub.publish(control_command) + self.waypoint_publisher = None while not self.kill_event.is_set(): if not self.stop_event.is_set(): + if self.waypoint_publisher is None and self.waypoint_publisher_path is not None: + self.waypoint_publisher = subprocess.Popen(["roslaunch", ROOT_PATH + '/' + self.waypoint_publisher_path]) control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() - control_command.command = 2 + if self.async_mode: + control_command.command = 2 # STEP_ONCE + else: + control_command.command = 0 # PLAY control_pub.publish(control_command) start_time = datetime.now() diff --git a/behavior_metrics/utils/configuration.py b/behavior_metrics/utils/configuration.py index fea6b62a..5a4bbafc 100644 --- a/behavior_metrics/utils/configuration.py +++ b/behavior_metrics/utils/configuration.py @@ -95,10 +95,16 @@ def initialize_configuration(self, config_data): self.robot_type = robot['Type'] self.pilot_time_cycle = robot['PilotTimeCycle'] self.current_world = config_data['Behaviors']['Simulation']['World'] + if 'WaypointPublisher' in config_data['Behaviors']['Simulation']: + self.waypoint_publisher_path = config_data['Behaviors']['Simulation']['WaypointPublisher'] + else: + self.waypoint_publisher_path = None if 'RealTimeUpdateRate' in config_data['Behaviors']['Simulation']: self.real_time_update_rate = config_data['Behaviors']['Simulation']['RealTimeUpdateRate'] else: self.real_time_update_rate = 1000 + if 'AsyncMode' in robot: + self.async_mode = robot['AsyncMode'] self.actuators = robot['Actuators'] self.sensors = robot['Sensors'] diff --git a/behavior_metrics/utils/controller_carla.py b/behavior_metrics/utils/controller_carla.py index 8390831d..053b4527 100644 --- a/behavior_metrics/utils/controller_carla.py +++ b/behavior_metrics/utils/controller_carla.py @@ -73,14 +73,23 @@ def __init__(self): self.cvbridge = CvBridge() client = carla.Client('localhost', 2000) - client.set_timeout(10.0) # seconds + client.set_timeout(100.0) # seconds self.world = client.get_world() time.sleep(5) self.carla_map = self.world.get_map() while len(self.world.get_actors().filter('vehicle.*')) == 0: logger.info("Waiting for vehicles!") time.sleep(1) - self.ego_vehicle = self.world.get_actors().filter('vehicle.*')[0] + ego_vehicle_role_name = "ego_vehicle" + self.ego_vehicle = None + while self.ego_vehicle is None: + for vehicle in self.world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == ego_vehicle_role_name: + self.ego_vehicle = vehicle + break + if self.ego_vehicle is None: + logger.info("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again self.map_waypoints = self.carla_map.generate_waypoints(0.5) self.weather = self.world.get_weather() @@ -238,6 +247,7 @@ def record_metrics(self, metrics_record_dir_path, world_counter=None, brain_coun 'carla_map': self.carla_map.name, 'ego_vehicle': self.ego_vehicle.type_id, 'vehicles_number': len(self.world.get_actors().filter('vehicle.*')), + 'async_mode': self.pilot.configuration.async_mode, 'weather': { 'cloudiness': self.weather.cloudiness, 'precipitation': self.weather.precipitation, @@ -301,7 +311,7 @@ def stop_recording_metrics(self): target_brain_iterations_real_time = 1 / (self.pilot.time_cycle / 1000) - if self.pilot.brains.active_brain.cameras_first_images != []: + if hasattr(self.pilot.brains.active_brain, 'cameras_first_images') and self.pilot.brains.active_brain.cameras_first_images != []: first_images = self.pilot.brains.active_brain.cameras_first_images last_images = self.pilot.brains.active_brain.cameras_last_images else: diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py index 31a27728..02bf0a71 100644 --- a/behavior_metrics/utils/environment.py +++ b/behavior_metrics/utils/environment.py @@ -56,7 +56,13 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con spawn_point.attrib['default'] = random.choice(CARLA_TOWNS_SPAWN_POINTS[town.attrib['default']]) tree.write('tmp_circuit.launch') with open("/tmp/.carlalaunch_stdout.log", "w") as out, open("/tmp/.carlalaunch_stderr.log", "w") as err: - subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + tree = ET.parse(ROOT_PATH + '/' + launch_file) + root = tree.getroot() + quality = root.find(".//*[@name=\"quality\"]") + if quality == None: + subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + elif quality.attrib['default'] == 'Low': + subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) #subprocess.Popen(["/home/jderobot/Documents/Projects/carla_simulator_0_9_13/CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) logger.info("SimulatorEnv: launching simulator server.") time.sleep(5) @@ -78,7 +84,7 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con time.sleep(5) -def close_ros_and_simulators(): +def close_ros_and_simulators(close_ros_resources=True): """Kill all the simulators and ROS processes.""" try: ps_output = subprocess.check_output(["ps", "-Af"]).decode('utf-8').strip("\n") @@ -114,7 +120,7 @@ def close_ros_and_simulators(): except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for CarlaUE4-Linux-Shipping {}".format(ce)) - if ps_output.count('rosout') > 0: + if ps_output.count('rosout') > 0 and close_ros_resources: try: import rosnode for node in rosnode.get_node_names(): @@ -124,7 +130,7 @@ def close_ros_and_simulators(): logger.debug("SimulatorEnv:rosout killed.") except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for rosout {}".format(ce)) - + if ps_output.count('bridge.py') > 0: try: os.system("ps -ef | grep 'bridge.py' | awk '{print $2}' | xargs kill -9") @@ -134,14 +140,14 @@ def close_ros_and_simulators(): except FileNotFoundError as ce: logger.error("SimulatorEnv: exception raised executing killall command for bridge.py {}".format(ce)) - if ps_output.count('rosmaster') > 0: + if ps_output.count('rosmaster') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "rosmaster"]) logger.debug("SimulatorEnv: rosmaster killed.") except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for rosmaster {}".format(ce)) - if ps_output.count('roscore') > 0: + if ps_output.count('roscore') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "roscore"]) logger.debug("SimulatorEnv: roscore killed.") @@ -155,14 +161,14 @@ def close_ros_and_simulators(): except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for px4 {}".format(ce)) - if ps_output.count('roslaunch') > 0: + if ps_output.count('roslaunch') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "roslaunch"]) logger.debug("SimulatorEnv: roslaunch killed.") except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for roslaunch {}".format(ce)) - if ps_output.count('rosout') > 0: + if ps_output.count('rosout') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "rosout"]) logger.debug("SimulatorEnv:rosout killed.") diff --git a/behavior_metrics/utils/metrics_carla.py b/behavior_metrics/utils/metrics_carla.py index 3fa2e086..549f02bc 100644 --- a/behavior_metrics/utils/metrics_carla.py +++ b/behavior_metrics/utils/metrics_carla.py @@ -584,7 +584,9 @@ def get_per_model_aggregated_metrics(result, experiments_starting_time_str, expe fig.tight_layout() plt.xticks(rotation=90) plt.legend(handles=color_handles) - plt.savefig(experiments_starting_time_str + '/' + unique_experiment_model + '_ ' + experiment_metric_and_title['metric'] + '.png') + if len(unique_experiment_model.split('/')) > 1: + unique_experiment_model = unique_experiment_model.split('/')[-1] + plt.savefig(experiments_starting_time_str + '/' + unique_experiment_model + '_' + experiment_metric_and_title['metric'] + '.png') plt.close() def get_all_experiments_aggregated_metrics_boxplot(result, experiments_starting_time_str, experiments_metrics_and_titles): diff --git a/behavior_metrics/utils/script_manager_gazebo.py b/behavior_metrics/utils/script_manager_gazebo.py index 32d0ab4a..361266cb 100644 --- a/behavior_metrics/utils/script_manager_gazebo.py +++ b/behavior_metrics/utils/script_manager_gazebo.py @@ -22,7 +22,7 @@ import rospy import random import sys - +import matplotlib.pyplot as plt import numpy as np from utils import metrics_gazebo @@ -30,18 +30,44 @@ from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED, CIRCUITS_TIMEOUTS from pilot_gazebo import PilotGazebo from utils.tmp_world_generator import tmp_world_generator -from rosgraph_msgs.msg import Clock - def run_brains_worlds(app_configuration, controller, randomize=False): + worlds = enumerate(app_configuration.current_world) + worlds_list = list(worlds) + length_worlds = len(worlds_list) + + # In case any other metric is needed. The available metrics can be found in metrics_gazebo.py > get_metrics + aggregated_metrics = { + "average_speed": "meters/s", + "percentage_completed": "%", + "position_deviation_mae": "meters", + } + metrics_len = len(aggregated_metrics) + + fig, axs = plt.subplots(length_worlds, metrics_len, figsize=(10, 5)) + # Start Behavior Metrics app for world_counter, world in enumerate(app_configuration.current_world): + parts = world.split('/')[-1] + # Get the keyword "dqn" from the file name + world_name = parts.split('.')[0] + brain_names = [] + + brains_metrics = {} for brain_counter, brain in enumerate(app_configuration.brain_path): + # Split the string by "/" + parts = brain.split('/')[-1] + # Get the keyword "dqn" from the file name + brain_name = parts.split('_')[-1].split('.')[0] + brain_names.append(brain_name) + brains_metrics[brain_name] = [] repetition_counter = 0 while repetition_counter < app_configuration.experiment_repetitions: + logger.info(f"repetition {repetition_counter+1} of {app_configuration.experiment_repetitions}" + f" for brain {brain} in world {world}") tmp_world_generator(world, app_configuration.stats_perfect_lap[world_counter], - app_configuration.real_time_update_rate, randomize=randomize, gui=False, - launch=True) + app_configuration.real_time_update_rate, randomize=randomize, gui=False, + launch=True, close_ros_resources=False) pilot = PilotGazebo(app_configuration, controller, app_configuration.brain_path[brain_counter]) pilot.daemon = True pilot.real_time_update_rate = app_configuration.real_time_update_rate @@ -104,11 +130,46 @@ def run_brains_worlds(app_configuration, controller, randomize=False): logger.info('* Model ---> ' + app_configuration.experiment_model[brain_counter]) if not pitch_error: logger.info('* Metrics ---> ' + str(controller.experiment_metrics)) - repetition_counter += 1 + brains_metrics[brain_name].append(controller.experiment_metrics) + os.remove('tmp_circuit.launch') os.remove('tmp_world.launch') while not controller.pilot.execution_completed: time.sleep(1) + + repetition_counter += 1 + + positions = list(range(1, len(brain_names) + 1)) + key_counter = 0 + for key in aggregated_metrics: + brains_metrics_names = [] + brains_metrics_data = [] + for brain_key in brains_metrics: + brain_metric_data = [] + for repetition_metrics in brains_metrics[brain_key]: + brain_metric_data.append(repetition_metrics[key]) + brains_metrics_names.append(brain_key) + brains_metrics_data.append(brain_metric_data) + + if length_worlds > 1: + # Create a boxplot for all metrics in the same axis + axs[world_counter, key_counter].boxplot(brains_metrics_data) + axs[world_counter, key_counter].set_xticks(positions) + axs[world_counter, key_counter].set_xticklabels(brains_metrics_names, fontsize=8) + axs[world_counter, key_counter].set_title(f"{key} in {world_name}") + axs[world_counter, key_counter].set_ylabel(aggregated_metrics[key]) + key_counter += 1 + else: + # Create a boxplot for all metrics in the same axis + axs[key_counter].boxplot(brains_metrics_data, positions=positions) + axs[key_counter].set_xticks(positions) + axs[key_counter].set_xticklabels(brains_metrics_names, fontsize=8) + axs[key_counter].set_title(f"{key} in {world_name}") + axs[key_counter].set_ylabel(aggregated_metrics[key]) + key_counter += 1 + + # Display the chart + plt.show() controller.stop_pilot() diff --git a/behavior_metrics/utils/tmp_world_generator.py b/behavior_metrics/utils/tmp_world_generator.py index e2e395ec..69bf70c8 100644 --- a/behavior_metrics/utils/tmp_world_generator.py +++ b/behavior_metrics/utils/tmp_world_generator.py @@ -34,8 +34,8 @@ def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, randomize=False, gui=False, - launch=False): - environment.close_ros_and_simulators() + launch=False, close_ros_resources=True): + environment.close_ros_and_simulators(close_ros_resources) tree = ET.parse(current_world) root = tree.getroot() for child in root[0]: diff --git a/requirements.txt b/requirements.txt index 229196c2..bb7e3ef0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ PyQt5==5.15.0 pyQt5-sip==12.8.1 -scipy==1.4.1 +scipy==1.10.0 jupyterlab==2.2.10 PyQt3D==5.15.0 npyscreen==4.10.5 @@ -19,14 +19,14 @@ netifaces==0.10.9 Pillow==9.3.0 pyglet==1.5.0 gym==0.17.3 -requests==2.21.0 +requests==2.31.0 six>=1.14.0 empy==3.3.2 vcstool==0.2.14 scikit-image==0.19.2 bagpy==0.4.10 pycryptodomex==3.9.9 -torch==1.11.0 +torch==1.13.1 torchvision==0.12.0 tensorboard==2.11.0 tensorboard-data-server==0.6.1