From a2f37af2cb57fb3035c757b4d8b879afd7cb5897 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:29:07 +0000 Subject: [PATCH 01/36] Bump torch from 1.11.0 to 1.13.1 Bumps [torch](https://github.com/pytorch/pytorch) from 1.11.0 to 1.13.1. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Changelog](https://github.com/pytorch/pytorch/blob/master/RELEASE.md) - [Commits](https://github.com/pytorch/pytorch/compare/v1.11.0...v1.13.1) --- updated-dependencies: - dependency-name: torch dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c4b705ae..a7d09544 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ vcstool==0.2.14 scikit-image==0.19.2 bagpy==0.4.10 pycryptodomex==3.9.9 -torch==1.11.0 +torch==1.13.1 torchvision==0.12.0 tensorboard==2.11.0 tensorboard-data-server==0.6.1 From 07b7f8ee8a7e3534fdc20b5ef610131b3d59c20a Mon Sep 17 00:00:00 2001 From: enrique Date: Tue, 21 Feb 2023 18:12:32 +0100 Subject: [PATCH 02/36] new brain for the subjective vision task on carla simulator --- ...jective_vision_deep_learning_previous_v.py | 238 ++++++++++++++++++ .../configs/carla_subjective_vision.yml | 73 ++++++ 2 files changed, 311 insertions(+) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py create mode 100644 behavior_metrics/configs/carla_subjective_vision.yml diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py new file mode 100644 index 00000000..61eccc54 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -0,0 +1,238 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from PIL import Image +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +"""gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True)""" + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + ''' + self.lock = threading.Lock() + self.threshold_image_lock = threading.Lock() + self.color_image_lock = threading.Lock() + ''' + self.cont = 0 + self.iteration = 0 + + # self.previous_timestamp = 0 + # self.previous_image = 0 + + self.previous_commanded_throttle = None + self.previous_commanded_steer = None + self.previous_commanded_brake = None + self.suddenness_distance = [] + self.suddenness_distance_throttle = [] + self.suddenness_distance_steer = [] + self.suddenness_distance_break_command = [] + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(200, 66) + img = cv2.resize(image, image_shape)/255.0 + + """AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"]""" + + #velocity_dim = np.full((150, 50), 0.5) + velocity_normalize = np.interp(self.previous_speed, (0, 100), (0, 1)) + velocity_dim = np.full((66, 200), velocity_normalize) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + print(img.shape) + prediction = self.net.predict(img) + self.inference_times.append(time.time() - start_time) + throttle_brake_val = np.interp(prediction[0][0], (0, 1), (-1, 1)) + steer = np.interp(prediction[0][1], (0, 1), (-1, 1)) + if throttle_brake_val >= 0: # throttle + throttle = throttle_brake_val + break_command = 0 + else: # brake + throttle = 0 + break_command = -1*throttle_brake_val + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if vehicle_speed < 10: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + if self.previous_commanded_throttle != None: + a = np.array((throttle, steer, break_command)) + b = np.array((self.previous_commanded_throttle, self.previous_commanded_steer, self.previous_commanded_brake)) + distance = np.linalg.norm(a - b) + self.suddenness_distance.append(distance) + + a = np.array((throttle)) + b = np.array((self.previous_commanded_throttle)) + distance_throttle = np.linalg.norm(a - b) + self.suddenness_distance_throttle.append(distance_throttle) + + a = np.array((steer)) + b = np.array((self.previous_commanded_steer)) + distance_steer = np.linalg.norm(a - b) + self.suddenness_distance_steer.append(distance_steer) + + a = np.array((break_command)) + b = np.array((self.previous_commanded_brake)) + distance_break_command = np.linalg.norm(a - b) + self.suddenness_distance_break_command.append(distance_break_command) + + self.previous_commanded_throttle = throttle + self.previous_commanded_steer = steer + self.previous_commanded_brake = break_command + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/configs/carla_subjective_vision.yml b/behavior_metrics/configs/carla_subjective_vision.yml new file mode 100644 index 00000000..277add65 --- /dev/null +++ b/behavior_metrics/configs/carla_subjective_vision.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py' + PilotTimeCycle: 100 + Parameters: + Model: '20230107-205705_rgb_brakingsimple_71_58k.h5' + ImageCropped: True + ImageSize: [ 200,66 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + RandomSpawnPoint: False + Simulation: + World: configs/CARLA_launch_files/town_02_anticlockwise.launch + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage From 3bf0a14a5208e8e1a84cdca3c60e9a5439cec4fe Mon Sep 17 00:00:00 2001 From: enrique Date: Tue, 21 Feb 2023 18:13:22 +0100 Subject: [PATCH 03/36] new brain for the subjective vision task on carla simulator --- ..._subjective_vision.yml => default_carla_subjective_vision.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename behavior_metrics/configs/{carla_subjective_vision.yml => default_carla_subjective_vision.yml} (100%) diff --git a/behavior_metrics/configs/carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml similarity index 100% rename from behavior_metrics/configs/carla_subjective_vision.yml rename to behavior_metrics/configs/default_carla_subjective_vision.yml From a90a357d6fe51327317e42794fbcea47f927a30d Mon Sep 17 00:00:00 2001 From: enrique Date: Wed, 22 Feb 2023 12:27:01 +0100 Subject: [PATCH 04/36] added the cropping of the image for the model to work correctly --- .../brain_carla_subjective_vision_deep_learning_previous_v.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py index 61eccc54..ddcc7665 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -122,6 +122,8 @@ def execute(self): image_1 = self.camera_1.getImage().data image_2 = self.camera_2.getImage().data image_3 = self.camera_3.getImage().data + + cropped = image[230:-1,:] bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) @@ -150,7 +152,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) image_shape=(200, 66) - img = cv2.resize(image, image_shape)/255.0 + img = cv2.resize(cropped, image_shape)/255.0 """AUGMENTATIONS_TEST = Compose([ Normalize() From 6d32983ed37d0502cbe65b9da3d9ad6635e58ee0 Mon Sep 17 00:00:00 2001 From: enrique Date: Mon, 27 Feb 2023 22:45:37 +0100 Subject: [PATCH 05/36] added the _Opt and deletion of some buildings to alleviate the resources consumtion --- ...in_carla_subjective_vision_deep_learning_previous_v.py | 8 ++++---- .../CARLA_launch_files/town_02_anticlockwise.launch | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py index ddcc7665..bd19510a 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -23,11 +23,11 @@ import tensorflow as tf import os -os.environ['CUDA_VISIBLE_DEVICES'] = '-1' +# os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -"""gpus = tf.config.experimental.list_physical_devices('GPU') +gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: - tf.config.experimental.set_memory_growth(gpu, True)""" + tf.config.experimental.set_memory_growth(gpu, True) class Brain: @@ -74,6 +74,7 @@ def __init__(self, sensors, actuators, handler, model, config=None): client = carla.Client('localhost', 2000) client.set_timeout(10.0) # seconds world = client.get_world() + world.unload_map_layer(carla.MapLayer.Buildings) time.sleep(5) self.vehicle = world.get_actors().filter('vehicle.*')[0] @@ -169,7 +170,6 @@ def execute(self): img = np.expand_dims(img, axis=0) start_time = time.time() try: - print(img.shape) prediction = self.net.predict(img) self.inference_times.append(time.time() - start_time) throttle_brake_val = np.interp(prediction[0][0], (0, 1), (-1, 1)) diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch index 4fa3bebe..6389fcf6 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch @@ -20,7 +20,7 @@ - + @@ -35,7 +35,7 @@ - + @@ -55,7 +55,7 @@ - + From 5ca428df1dc55b314ebcef67de45dd2345b1f7e2 Mon Sep 17 00:00:00 2001 From: ruben Date: Wed, 15 Mar 2023 22:08:49 +0100 Subject: [PATCH 06/36] adapted script to automatically run experiments in gazebo first iteration fixing the script_manager_gazebo. It is now working and plotting the indicated metrics at the beginning of the script Still room of improvement for future script iterations --- .../brains/agents/f1/f1_follow_line_qlearn.py | 3 - ...earn.py => brain_f1_follow_line_qlearn.py} | 0 .../f1/rl_utils/models/f1_env_camera.py | 17 +---- .../f1/rl_utils/models/followline_dqn_tf.py | 1 - .../brains/f1/rl_utils/models/rewards.py | 10 +-- .../brains/f1/rl_utils/models/step.py | 3 - behavior_metrics/configs/default-multiple.yml | 9 ++- .../configs/default-rl-qlearn.yml | 2 +- behavior_metrics/utils/environment.py | 48 ++++++------ .../utils/script_manager_gazebo.py | 73 +++++++++++++++++-- 10 files changed, 99 insertions(+), 67 deletions(-) rename behavior_metrics/brains/f1/{brain_f1_follow_line_q_learn.py => brain_f1_follow_line_qlearn.py} (100%) diff --git a/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py b/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py index b1e65b70..34ef83e2 100755 --- a/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py +++ b/behavior_metrics/brains/agents/f1/f1_follow_line_qlearn.py @@ -105,7 +105,6 @@ def save_model(): highest_reward = cumulated_reward nextState = ''.join(map(str, observation)) -# print(nextState) qlearn.learn(state, action, reward, nextState) @@ -120,8 +119,6 @@ def save_model(): if step > 3000: print("\n\nLAP COMPLETED!!\n\n") - # print("Obser: {} - Rew: {}".format(observation, reward)) - if episode % 100 == 0: plotter.plot(env) if settings.save_model: diff --git a/behavior_metrics/brains/f1/brain_f1_follow_line_q_learn.py b/behavior_metrics/brains/f1/brain_f1_follow_line_qlearn.py similarity index 100% rename from behavior_metrics/brains/f1/brain_f1_follow_line_q_learn.py rename to behavior_metrics/brains/f1/brain_f1_follow_line_qlearn.py diff --git a/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py b/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py index d3c13061..41b065d9 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py +++ b/behavior_metrics/brains/f1/rl_utils/models/f1_env_camera.py @@ -94,24 +94,9 @@ def step(self, action) -> Tuple: points = self.processed_image(f1_image_camera.data) state = self.calculate_observation(points) - center = float(self.config.center_image - points[0]) / ( - float(self.config.width) // 2 - ) done = False - center = abs(center) - - if center > 0.9: - done = True - if not done: - if 0 <= center <= 0.2: - reward = 10 - elif 0.2 < center <= 0.4: - reward = 2 - else: - reward = 1 - else: - reward = -100 + reward = 0 return state, reward, done, {} diff --git a/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py b/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py index f9e12bdb..75856e3c 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py +++ b/behavior_metrics/brains/f1/rl_utils/models/followline_dqn_tf.py @@ -30,7 +30,6 @@ def reset(self): return Reset.reset_f1_state_image(self) else: return Reset.reset_f1_state_sp(self) - return Reset.reset_f1_state_sp(self) def step(self, action, step): from .step import ( diff --git a/behavior_metrics/brains/f1/rl_utils/models/rewards.py b/behavior_metrics/brains/f1/rl_utils/models/rewards.py index 80da0995..6ac8a19d 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/rewards.py +++ b/behavior_metrics/brains/f1/rl_utils/models/rewards.py @@ -75,15 +75,7 @@ def rewards_followline_center(self, center, rewards): original for Following Line """ done = False - if center > 0.9: - done = True - reward = rewards["penal"] - elif 0 <= center <= 0.2: - reward = rewards["from_10"] - elif 0.2 < center <= 0.4: - reward = rewards["from_02"] - else: - reward = rewards["from_01"] + reward = 0 return reward, done diff --git a/behavior_metrics/brains/f1/rl_utils/models/step.py b/behavior_metrics/brains/f1/rl_utils/models/step.py index 102a640f..5e619801 100644 --- a/behavior_metrics/brains/f1/rl_utils/models/step.py +++ b/behavior_metrics/brains/f1/rl_utils/models/step.py @@ -48,13 +48,10 @@ def step_followline_state_image_actions_discretes(self, action, step): return state, reward, done, {} def step_followline_state_sp_actions_discretes(self, action, step): - # self._gazebo_unpause() vel_cmd = Twist() vel_cmd.linear.x = self.actions[action][0] vel_cmd.angular.z = self.actions[action][1] self.vel_pub.publish(vel_cmd) - # time.sleep(0.1) - # self._gazebo_pause() ##==== get image from sensor camera f1_image_camera, _ = self.f1gazeboimages.get_camera_info() diff --git a/behavior_metrics/configs/default-multiple.yml b/behavior_metrics/configs/default-multiple.yml index 226c514b..6bf41372 100644 --- a/behavior_metrics/configs/default-multiple.yml +++ b/behavior_metrics/configs/default-multiple.yml @@ -16,10 +16,10 @@ Behaviors: Topic: '/F1ROS/cmd_vel' MaxV: 3 MaxW: 0.3 - BrainPath: ['brains/f1/brain_f1_keras.py', 'brains/f1/brain_f1_keras.py'] + BrainPath: ['brains/f1/brain_f1_follow_line_dqn.py', 'brains/f1/brain_f1_follow_line_qlearn.py'] PilotTimeCycle: 50 Parameters: - Model: ['model_deepest_lstm_cropped_250_norm_max_pooling.h5', 'model_deepest_lstm_cropped_250_norm_test.h5'] +# Model: ['model_deepest_lstm_cropped_250_norm_max_pooling.h5', 'model_deepest_lstm_cropped_250_norm_test.h5'] ImageCropped: True ImageSize: [100,50] ImageNormalized: True @@ -29,8 +29,9 @@ Behaviors: Experiment: Name: "Experiment name" Description: "Experiment description" - Timeout: [30, 30] - Repetitions: 2 + Timeout: [400, 400] + UseWorldTimeouts: [400, 400] + Repetitions: 5 Simulation: World: ['/opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch', '/opt/jderobot/share/jderobot/gazebo/launch/many_curves.launch'] RealTimeUpdateRate: 1000 diff --git a/behavior_metrics/configs/default-rl-qlearn.yml b/behavior_metrics/configs/default-rl-qlearn.yml index 3909475b..1f220904 100644 --- a/behavior_metrics/configs/default-rl-qlearn.yml +++ b/behavior_metrics/configs/default-rl-qlearn.yml @@ -17,7 +17,7 @@ Behaviors: MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/f1/brain_f1_follow_line_q_learn.py' + BrainPath: 'brains/f1/brain_f1_follow_line_qlearn.py' PilotTimeCycle: 50 Parameters: ImageTranform: '' diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py index 31a27728..4652b20c 100644 --- a/behavior_metrics/utils/environment.py +++ b/behavior_metrics/utils/environment.py @@ -114,17 +114,17 @@ def close_ros_and_simulators(): except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for CarlaUE4-Linux-Shipping {}".format(ce)) - if ps_output.count('rosout') > 0: - try: - import rosnode - for node in rosnode.get_node_names(): - if node != '/carla_ros_bridge': - subprocess.check_call(["rosnode", "kill", node]) - - logger.debug("SimulatorEnv:rosout killed.") - except subprocess.CalledProcessError as ce: - logger.error("SimulatorEnv: exception raised executing killall command for rosout {}".format(ce)) - + # if ps_output.count('rosout') > 0: + # try: + # import rosnode + # for node in rosnode.get_node_names(): + # if node != '/carla_ros_bridge': + # subprocess.check_call(["rosnode", "kill", node]) + # + # logger.debug("SimulatorEnv:rosout killed.") + # except subprocess.CalledProcessError as ce: + # logger.error("SimulatorEnv: exception raised executing killall command for rosout {}".format(ce)) + # if ps_output.count('bridge.py') > 0: try: os.system("ps -ef | grep 'bridge.py' | awk '{print $2}' | xargs kill -9") @@ -134,19 +134,19 @@ def close_ros_and_simulators(): except FileNotFoundError as ce: logger.error("SimulatorEnv: exception raised executing killall command for bridge.py {}".format(ce)) - if ps_output.count('rosmaster') > 0: - try: - subprocess.check_call(["killall", "-9", "rosmaster"]) - logger.debug("SimulatorEnv: rosmaster killed.") - except subprocess.CalledProcessError as ce: - logger.error("SimulatorEnv: exception raised executing killall command for rosmaster {}".format(ce)) - - if ps_output.count('roscore') > 0: - try: - subprocess.check_call(["killall", "-9", "roscore"]) - logger.debug("SimulatorEnv: roscore killed.") - except subprocess.CalledProcessError as ce: - logger.error("SimulatorEnv: exception raised executing killall command for roscore {}".format(ce)) + # if ps_output.count('rosmaster') > 0: + # try: + # subprocess.check_call(["killall", "-9", "rosmaster"]) + # logger.debug("SimulatorEnv: rosmaster killed.") + # except subprocess.CalledProcessError as ce: + # logger.error("SimulatorEnv: exception raised executing killall command for rosmaster {}".format(ce)) + # + # if ps_output.count('roscore') > 0: + # try: + # subprocess.check_call(["killall", "-9", "roscore"]) + # logger.debug("SimulatorEnv: roscore killed.") + # except subprocess.CalledProcessError as ce: + # logger.error("SimulatorEnv: exception raised executing killall command for roscore {}".format(ce)) if ps_output.count('px4') > 0: try: diff --git a/behavior_metrics/utils/script_manager_gazebo.py b/behavior_metrics/utils/script_manager_gazebo.py index 32d0ab4a..5317f32b 100644 --- a/behavior_metrics/utils/script_manager_gazebo.py +++ b/behavior_metrics/utils/script_manager_gazebo.py @@ -22,7 +22,7 @@ import rospy import random import sys - +import matplotlib.pyplot as plt import numpy as np from utils import metrics_gazebo @@ -30,18 +30,44 @@ from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED, CIRCUITS_TIMEOUTS from pilot_gazebo import PilotGazebo from utils.tmp_world_generator import tmp_world_generator -from rosgraph_msgs.msg import Clock - def run_brains_worlds(app_configuration, controller, randomize=False): + worlds = enumerate(app_configuration.current_world) + worlds_list = list(worlds) + length_worlds = len(worlds_list) + + # In case any other metric is needed. The available metrics can be found in metrics_gazebo.py > get_metrics + aggregated_metrics = { + "average_speed": "meters/s", + "percentage_completed": "%", + "position_deviation_mae": "meters", + } + metrics_len = len(aggregated_metrics) + + fig, axs = plt.subplots(length_worlds, metrics_len, figsize=(10, 5)) + # Start Behavior Metrics app for world_counter, world in enumerate(app_configuration.current_world): + parts = world.split('/')[-1] + # Get the keyword "dqn" from the file name + world_name = parts.split('.')[0] + brain_names = [] + + brains_metrics = {} for brain_counter, brain in enumerate(app_configuration.brain_path): + # Split the string by "/" + parts = brain.split('/')[-1] + # Get the keyword "dqn" from the file name + brain_name = parts.split('_')[-1].split('.')[0] + brain_names.append(brain_name) + brains_metrics[brain_name] = [] repetition_counter = 0 while repetition_counter < app_configuration.experiment_repetitions: + logger.info(f"repetition {repetition_counter+1} of {app_configuration.experiment_repetitions}" + f" for brain {brain} in world {world}") tmp_world_generator(world, app_configuration.stats_perfect_lap[world_counter], - app_configuration.real_time_update_rate, randomize=randomize, gui=False, - launch=True) + app_configuration.real_time_update_rate, randomize=randomize, gui=False, + launch=True) pilot = PilotGazebo(app_configuration, controller, app_configuration.brain_path[brain_counter]) pilot.daemon = True pilot.real_time_update_rate = app_configuration.real_time_update_rate @@ -104,11 +130,46 @@ def run_brains_worlds(app_configuration, controller, randomize=False): logger.info('* Model ---> ' + app_configuration.experiment_model[brain_counter]) if not pitch_error: logger.info('* Metrics ---> ' + str(controller.experiment_metrics)) - repetition_counter += 1 + brains_metrics[brain_name].append(controller.experiment_metrics) + os.remove('tmp_circuit.launch') os.remove('tmp_world.launch') while not controller.pilot.execution_completed: time.sleep(1) + + repetition_counter += 1 + + positions = list(range(1, len(brain_names) + 1)) + key_counter = 0 + for key in aggregated_metrics: + brains_metrics_names = [] + brains_metrics_data = [] + for brain_key in brains_metrics: + brain_metric_data = [] + for repetition_metrics in brains_metrics[brain_key]: + brain_metric_data.append(repetition_metrics[key]) + brains_metrics_names.append(brain_key) + brains_metrics_data.append(brain_metric_data) + + if length_worlds > 1: + # Create a boxplot for all metrics in the same axis + axs[world_counter, key_counter].boxplot(brains_metrics_data) + axs[world_counter, key_counter].set_xticks(positions) + axs[world_counter, key_counter].set_xticklabels(brains_metrics_names, fontsize=8) + axs[world_counter, key_counter].set_title(f"{key} in {world_name}") + axs[world_counter, key_counter].set_ylabel(aggregated_metrics[key]) + key_counter += 1 + else: + # Create a boxplot for all metrics in the same axis + axs[key_counter].boxplot(brains_metrics_data, positions=positions) + axs[key_counter].set_xticks(positions) + axs[key_counter].set_xticklabels(brains_metrics_names, fontsize=8) + axs[key_counter].set_title(f"{key} in {world_name}") + axs[key_counter].set_ylabel(aggregated_metrics[key]) + key_counter += 1 + + # Display the chart + plt.show() controller.stop_pilot() From 73c3b0d1a958cfedbe15595380bcc44beda3def3 Mon Sep 17 00:00:00 2001 From: Dhruv Patel Date: Sun, 9 Apr 2023 01:10:09 +0530 Subject: [PATCH 07/36] Launching CARLA town01 with pedestrian, bike and cars --- .../parked_bike_car_objects.json | 170 +++++++++++++++++ .../parked_bike_objects.json | 166 +++++++++++++++++ .../pedestrian_objects.json | 166 +++++++++++++++++ .../pedestrian_parked_bike_car_objects.json | 174 ++++++++++++++++++ .../town_01_anticlockwise_parked_bike.launch | 59 ++++++ ...wn_01_anticlockwise_parked_bike_car.launch | 64 +++++++ .../town_01_anticlockwise_pedestrian.launch | 63 +++++++ ...lockwise_pedestrian_parked_bike_car.launch | 69 +++++++ .../configs/default_carla_parked_bike.yml | 73 ++++++++ .../configs/default_carla_parked_bike_car.yml | 73 ++++++++ .../configs/default_carla_pedestrian.yml | 73 ++++++++ ...fault_carla_pedestrian_parked_bike_car.yml | 73 ++++++++ 12 files changed, 1223 insertions(+) create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch create mode 100644 behavior_metrics/configs/default_carla_parked_bike.yml create mode 100644 behavior_metrics/configs/default_carla_parked_bike_car.yml create mode 100644 behavior_metrics/configs/default_carla_pedestrian.yml create mode 100644 behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json new file mode 100644 index 00000000..70124e1f --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_car_objects.json @@ -0,0 +1,170 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + }, + { + "type": "vehicle.audi.a2", + "id": "parked_car" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json new file mode 100644 index 00000000..816ebda5 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/parked_bike_objects.json @@ -0,0 +1,166 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json new file mode 100644 index 00000000..19e2556f --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_objects.json @@ -0,0 +1,166 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "walker.pedestrian.0001", + "id": "pedestrian" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json new file mode 100644 index 00000000..f4079106 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/pedestrian_parked_bike_car_objects.json @@ -0,0 +1,174 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.yamaha.yzf", + "id": "parked_bike" + }, + { + "type": "vehicle.audi.a2", + "id": "parked_car" + }, + { + "type": "walker.pedestrian.0001", + "id": "pedestrian" + } + ] +} \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch new file mode 100644 index 00000000..739a3b53 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch new file mode 100644 index 00000000..e8869fcb --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch new file mode 100644 index 00000000..1b24f8db --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch new file mode 100644 index 00000000..62473842 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/default_carla_parked_bike.yml b/behavior_metrics/configs/default_carla_parked_bike.yml new file mode 100644 index 00000000..bd8efae1 --- /dev/null +++ b/behavior_metrics/configs/default_carla_parked_bike.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_parked_bike.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_parked_bike_car.yml b/behavior_metrics/configs/default_carla_parked_bike_car.yml new file mode 100644 index 00000000..6942aaa4 --- /dev/null +++ b/behavior_metrics/configs/default_carla_parked_bike_car.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_parked_bike_car.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_pedestrian.yml b/behavior_metrics/configs/default_carla_pedestrian.yml new file mode 100644 index 00000000..2b72b1ba --- /dev/null +++ b/behavior_metrics/configs/default_carla_pedestrian.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml new file mode 100644 index 00000000..e18b3a27 --- /dev/null +++ b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 100 + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_pedestrian_parked_bike_car.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage From 22b2add582813eb3d6b081883d8ca2a9965ed996 Mon Sep 17 00:00:00 2001 From: enrique Date: Wed, 12 Apr 2023 17:16:53 +0200 Subject: [PATCH 08/36] minor changes on the launch file --- ...carla_subjective_vision_deep_learning_previous_v.py | 10 ++++++---- .../CARLA_launch_files/town_02_anticlockwise.launch | 6 +++--- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py index bd19510a..f088eccd 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -73,11 +73,11 @@ def __init__(self, sensors, actuators, handler, model, config=None): client = carla.Client('localhost', 2000) client.set_timeout(10.0) # seconds - world = client.get_world() - world.unload_map_layer(carla.MapLayer.Buildings) + self.world = client.get_world() + self.world.unload_map_layer(carla.MapLayer.Buildings) time.sleep(5) - self.vehicle = world.get_actors().filter('vehicle.*')[0] + self.vehicle = self.world.get_actors().filter('vehicle.*')[0] if model: if not path.exists(PRETRAINED_MODELS + model): @@ -125,6 +125,8 @@ def execute(self): image_3 = self.camera_3.getImage().data cropped = image[230:-1,:] + if self.cont < 20: + self.cont += 1 bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) @@ -185,7 +187,7 @@ def execute(self): vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) self.previous_speed = vehicle_speed - if vehicle_speed < 10: + if self.cont < 20: self.motors.sendThrottle(1.0) self.motors.sendSteer(0.0) self.motors.sendBrake(0) diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch index 6389fcf6..4fa3bebe 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch @@ -20,7 +20,7 @@ - + @@ -35,7 +35,7 @@ - + @@ -55,7 +55,7 @@ - + From f92e7e94a8f05a4e1a7af19b364aea015dd5fba0 Mon Sep 17 00:00:00 2001 From: enrique Date: Wed, 12 Apr 2023 20:35:52 +0200 Subject: [PATCH 09/36] stay up to date to the original repository --- behavior_metrics/configs/default_carla_subjective_vision.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/behavior_metrics/configs/default_carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml index 277add65..8a95ffd9 100644 --- a/behavior_metrics/configs/default_carla_subjective_vision.yml +++ b/behavior_metrics/configs/default_carla_subjective_vision.yml @@ -45,9 +45,9 @@ Behaviors: UseOptimized: True ImageTranform: '' Type: 'CARLA' - RandomSpawnPoint: False Simulation: World: configs/CARLA_launch_files/town_02_anticlockwise.launch + RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' Out: '' From a1b59392d72853d25dfae7670827e4b1b8c420d8 Mon Sep 17 00:00:00 2001 From: ruben Date: Thu, 13 Apr 2023 19:52:06 +0200 Subject: [PATCH 10/36] --amend --- behavior_metrics/configs/default-multiple.yml | 4 +- .../fingerprint.pb | Bin 0 -> 55 bytes .../keras_metadata.pb | 7 +++ .../saved_model.pb | Bin 0 -> 88492 bytes .../variables/variables.data-00000-of-00001 | Bin 0 -> 9532 bytes .../variables/variables.index | Bin 0 -> 1393 bytes behavior_metrics/utils/environment.py | 54 +++++++++--------- .../utils/script_manager_gazebo.py | 2 +- behavior_metrics/utils/tmp_world_generator.py | 4 +- 9 files changed, 39 insertions(+), 32 deletions(-) create mode 100644 behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb create mode 100644 behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb create mode 100644 behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb create mode 100644 behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 create mode 100644 behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index diff --git a/behavior_metrics/configs/default-multiple.yml b/behavior_metrics/configs/default-multiple.yml index 6bf41372..58da4295 100644 --- a/behavior_metrics/configs/default-multiple.yml +++ b/behavior_metrics/configs/default-multiple.yml @@ -29,8 +29,8 @@ Behaviors: Experiment: Name: "Experiment name" Description: "Experiment description" - Timeout: [400, 400] - UseWorldTimeouts: [400, 400] + Timeout: [5, 5] + UseWorldTimeouts: [5, 5] Repetitions: 5 Simulation: World: ['/opt/jderobot/share/jderobot/gazebo/launch/simple_circuit.launch', '/opt/jderobot/share/jderobot/gazebo/launch/many_curves.launch'] diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/fingerprint.pb new file mode 100644 index 0000000000000000000000000000000000000000..902cdbea1924bde5abbaa2d839d6f002f890edd0 GIT binary patch literal 55 zcmV-70LcFcq{-Uo^MUX0KoHEY?7@zRwX@m*7=Y#9gRQi!_I)6-fQGIA%E0jV0Vtr- NivRS+iMG1|G62ujA-ezo literal 0 HcmV?d00001 diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb new file mode 100644 index 00000000..34cd4e36 --- /dev/null +++ b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/keras_metadata.pb @@ -0,0 +1,7 @@ + +ý)root"_tf_keras_network*Û){"name": "model", "trainable": true, "expects_training_arg": true, "dtype": "float32", "batch_input_shape": null, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": false, "class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": []}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense", "inbound_nodes": [[["input_1", 0, 0, {}]]]}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_1", "inbound_nodes": [[["dense", 0, 0, {}]]]}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_2", "inbound_nodes": [[["dense_1", 0, 0, {}]]]}], "input_layers": [["input_1", 0, 0]], "output_layers": [["dense_2", 0, 0]]}, "shared_object_id": 10, "input_spec": [{"class_name": "InputSpec", "config": {"dtype": null, "shape": {"class_name": "__tuple__", "items": [null, 1]}, "ndim": 2, "max_ndim": null, "min_ndim": null, "axes": {}}}], "build_input_shape": {"class_name": "TensorShape", "items": [null, 1]}, "is_graph_network": true, "full_save_spec": {"class_name": "__tuple__", "items": [[{"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 1]}, "float32", "input_1"]}], {}]}, "save_spec": {"class_name": "TypeSpec", "type_spec": "tf.TensorSpec", "serialized": [{"class_name": "TensorShape", "items": [null, 1]}, "float32", "input_1"]}, "keras_version": "2.11.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}, "name": "input_1", "inbound_nodes": [], "shared_object_id": 0}, {"class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense", "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3}, {"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_1", "inbound_nodes": [[["dense", 0, 0, {}]]], "shared_object_id": 6}, {"class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "dense_2", "inbound_nodes": [[["dense_1", 0, 0, {}]]], "shared_object_id": 9}], "input_layers": [["input_1", 0, 0]], "output_layers": [["dense_2", 0, 0]]}}, "training_config": {"loss": "mse", "metrics": null, "weighted_metrics": null, "loss_weights": null, "optimizer_config": {"class_name": "Custom>Adam", "config": {"name": "Adam", "weight_decay": null, "clipnorm": null, "global_clipnorm": null, "clipvalue": null, "use_ema": false, "ema_momentum": 0.99, "ema_overwrite_frequency": null, "jit_compile": true, "is_legacy_optimizer": false, "learning_rate": 0.004999999888241291, "beta_1": 0.9, "beta_2": 0.999, "epsilon": 1e-07, "amsgrad": false}}}}2 +ö root.layer-0"_tf_keras_input_layer*Æ{"class_name": "InputLayer", "name": "input_1", "dtype": "float32", "sparse": false, "ragged": false, "batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "config": {"batch_input_shape": {"class_name": "__tuple__", "items": [null, 1]}, "dtype": "float32", "sparse": false, "ragged": false, "name": "input_1"}}2 +ªroot.layer_with_weights-0"_tf_keras_layer*ó{"name": "dense", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 1}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 2}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["input_1", 0, 0, {}]]], "shared_object_id": 3, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 1}}, "shared_object_id": 12}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 1]}}2 +®root.layer_with_weights-1"_tf_keras_layer*÷{"name": "dense_1", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "dtype": "float32", "units": 16, "activation": "relu", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 4}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 5}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dense", 0, 0, {}]]], "shared_object_id": 6, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 16}}, "shared_object_id": 13}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16]}}2 +±root.layer_with_weights-2"_tf_keras_layer*ú{"name": "dense_2", "trainable": true, "expects_training_arg": false, "dtype": "float32", "batch_input_shape": null, "stateful": false, "must_restore_from_config": false, "preserve_input_structure_in_config": false, "autocast": true, "class_name": "Dense", "config": {"name": "dense_2", "trainable": true, "dtype": "float32", "units": 3, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "GlorotUniform", "config": {"seed": null}, "shared_object_id": 7}, "bias_initializer": {"class_name": "Zeros", "config": {}, "shared_object_id": 8}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "inbound_nodes": [[["dense_1", 0, 0, {}]]], "shared_object_id": 9, "input_spec": {"class_name": "InputSpec", "config": {"dtype": null, "shape": null, "ndim": null, "max_ndim": null, "min_ndim": 2, "axes": {"-1": 16}}, "shared_object_id": 14}, "build_input_shape": {"class_name": "TensorShape", "items": [null, 16]}}2 +¹Proot.keras_api.metrics.0"_tf_keras_metric*‚{"class_name": "Mean", "name": "loss", "dtype": "float32", "config": {"name": "loss", "dtype": "float32"}, "shared_object_id": 15}2 \ No newline at end of file diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/saved_model.pb new file mode 100644 index 0000000000000000000000000000000000000000..412f461ce83a738b6a38d85406df54534109caaa GIT binary patch literal 88492 zcmeHwYj7mTc^GCFzz!O(MFV%h%^mK*z4wxL*qz05DT_QDk0L2PPIt#6MN&81!wzt( zTkImci@PIJ<;Y5;Fih;|WtB}ij)WhoSShDuE0OF<$x@ues<;$al0R`JapFHtMWraI zluNF}sfzOT^z=;kOwVH%dpP0>s2jrrso|+K|haDE0t37Lb;3)Ye>0<2T<=#c%4zMX@jW0zSY{? zYQaNh2Ej+G;o<=_u8%2+WN%-;IxsjiJTivI4^C#zp+V@YWUSR28>JTRZ?+nh+Pc<{ zdtZF!m5ap@G(`V`|6hFO6|{uv(~D# z-Xfq^Y7|5f-T?<*^jA>-3#HZzTUFeTloAYv{N5xZ973thM!meXY7KD`4YnGkT644B zG>wvrt&H&DrtrZ*$`C@jIx-Gu8Bj6$tb*_hW@FvFu3x-muHN3PS8A;$o@QQP%waTIHmfGgV7*#4%9Tdb>0QYjL*u1twSL#w zr~skV){V7F)pQG3v)qq*U#!2hiC#dxmrJX+foA$GnjsUMRbCWvHiVQHaS|XsqvVu~ zi^0q&>Mh-?G_7x1VyXvSM1z;BrB(A5fMquPQ|KYZ3Did^kp5`B#l)0?oB-hk->it48m1Jja5LzD!KT4}?Ys0oy|Mq-p|WusZGtdh6d7)sG{O}*b* zi_Odwvj4)UT4}bIlZY76i*uv)K`D}>0V5LpI< z@X4=9@FpyHclyyYX!Po>Qlo5^p9Tses34r^W~piY4R{)`=C@IvN&J|`#L8grV2s8H&VDg?s6J!XL)hp(@dGG11Y8AN4>Meogpz;_`ja`1MlH?*RmuB zB?Z6t>%Fk+^{v*oYAwtUD0>i%7!;v&Cj%yj62+eEAlva0IfXn>Ajy}5;@DD{Hj7?C zhphHz3Foo?$FUy|JJM*KSiKX#TlH26$e5dnSXH9{x0)q@TYtm@+$tKYcaQ_dFO*9g zvv+1;Plb(7pC$W$2g87Eo8rH}*ozZtGJ8Utg9BLB5xR>`OZtyBOj&zb5PLkdq9O(n zPcP2EH+E~_Vn@!v#n){NT&x&>;2@kF@SyxQuqU$<+8t+;j*2QwdfJ-!Q{u!=VtIGy zuc5P2IAY;I-e&fwZ~8$p<6sj=z`=xz%3Z;Pw>t+D-VbeH!dtaHOnA9KzDryn9mNG` zN6unkT!_AoHGGi^(2608Aua40J}z+I2hQfnGjQ>B8v_?B#viyt`!346N&D$+1Q*E9 z((WAE_rehi2O<|>ZAQ?(hXfoj}sd%%1*&Vc!sZ48*N7JtB>Rzz{nUW6Tpd!C(NA-jJB zvu7**uZkj{{W{jz1!iwo454ufm9XbK7+?tO)>;q6fY=#g7!Y5#fdR2%dl;yp;i_3` z)QHLm4wGgFlP{dLj*6>$6pPKHb@YhPU#ww@ZNxjutf7Gl9JDR9PuqhU5YJ(nIH*DF zHjNB4?er6CFmByA0~tV@XsFwVv1xHUBUtDGRnQFIW2{jLErhV3q$qLSLnExKZY@1} z>^LS4F^xU_fFd{}_8e=No>oHE8U;h(HqK0TPp%A_Nn4O{<>XRuX-EaK^!jBr>a*CEWck4$vb z?Jd^exZ3e&uw^rvE{g#HB+B;x4V&4tv++|B?P$k;8%+{DB-!rD=2~g1+A=JKtC7dN z6(f7vr#b*jS8)svsy}1@kUiegG+J7IOw^7?mb(G@z%H3G1Kiu&r9}LrGmxveRhNSgluy5utL&G&V}L z(z@9&D&-~yRc~C(C9`LcW;8(aYSe4hx8N^o%cRrM1k;PE^;6SKR`)3BGk6B2Q1asC zm(vRQ<0Iq`JvF0bmE3bZOUkE}p6q#|`9FK*G&*TOlWS(ftgV9W6KtrKZOFLWC~a;6 z*oB4pLSA1+L)17121#Nf`}b#UizKFo$;>b)28nXJ2%Q#ladz%h_6yL6WnjbaoL%|M zSv7@wR{q}^^hXIas46&BExlzn&gIaQn!pp*gK@Xgx@FuoE9Zj6ttym# zn_48R^=8v-LK7+;fL*)PG)kKlG^7q-)i74U7+@GEt0H{L`kNTX8YbA|Y6kh_*qlmL z!*1xXI*2Eo<=6ycm^)9<%Mc!@Z?-BMmAB0X8c~Naa%Qoag(Zj**&g(D1xk*HLmCYl zQaX4@W8#poFr@L=Lqh5V?xzwBQq==ss0BT`1*`?=K@rf&AV8;r0X-xFstJG|ZUfLG z9H7%)K#z)m9t#3=CK%91L_jkFpvT()^aKa!NelHOk<@0 zKPRv<2R|pVGH=a5yX6UaeKUa$tEcc}4Gi+KWrEbxxZhgCO;~4VaB9N@quy#09aGQZ z!*p@WSJ9Z7g)Kq<4oFiHj zVRasl7{VbGjj9WzP_1m<1ILQh7PPVmt!&gcz_ny+qX`9;puiooT3-b-H88&kbs49P ztxf7t(=s=Wd)1OPZt&|+i+I!{WdLv(h>)XGtFY`H8ORq2y*(aAZy)05?dN!U`*0M! zJ>jRf9W%}$wNp-cN*#|)Qll{EPqI1B0h-8*3;c^W@O3nD{lb-JFRVO&$+&*u`Ij%* zhg&A@EXa0RCVZa1aP8WaXIEancIoP@*orkgB1X;OYxtV4ec>>z$>_sAna7|H*vANcz&_^EhxeUBAJ2x-$8#Kge3Yk; z=cDN31wVc4SVS07JCzAesmI%*j~97zAoTGP|00AwoWqigKG??{&<8zyN$G=rifWt8 z`wS3=LTMyH<)9>$gL**@vN#H8qtwe#!g?T8;{10LD$UVmKSrCqLYuux#j9&nym}eL zD=2v#D+}=R3RV{3=T)qLYp42gtSqB`^);Nx=iv8s_|3!b8}O@Juw2D2_?C{VC)IL} z{6EX>F3ngmo3xf5_eYg?12bE274fa(3|X`01qYhFb5Ae*~5Iy z>5fAZ-ZD#Y_FCmn3d3rdm-+`(@D`)4O9Sef?S5rVyll-;vyN!g(DK|&Ij7UveAGMz zw{KgRnOGoiUyo{ECT~z1B!i#q{sGVzI&YmpAJY0M zay)I$QIXBs2mIOGEOpst4+|Jy2-Wi$Xc;QGa0Pm zHeoH=2Ch*@ShkB$J}d=T@R$w5A9bl=Et2&jZ7-hZjYhpST`n5}q3GBdIzPgw!=_0X zbyz_WPR8q`BNyQeEh@qpLI@(9Aq9gyy0u_0b&Xmlvd5{_<_OaCsgz>_6SIIn>G>QD^cWcuEBsIAYq$c!kFBkhI+d^&gpx7>31a^0~qu$7S>iwoDR0X4w|!3iJ36 zekX~3Fo1rlpr`oaw*QGiwT97Zb*nvnF-OAE>?k1x=g?7zu-w?HTDDYnG^%ep2GyyF z;t=Qs9FKf_4jm)Rzf>l6xl&nPMTII>urwQK+O!nRqGhTD+ir@)2NG+3Odo)x z)<6L$@r^JF^C8|Ep5fhbLv~s0u9l_)E820a#hi9jK2zPHb~5$6+cVj-5T|(9S4LHXO!%O&$$Ek zt<1Db=y;3;6*~h0cZCDQcnwi)5F$(hb4kPIy%q)^$J>{P3&a(ORjX{6*(@5iol2}2 zs3tyyCjG7>>0(c@N6!V4P5?>i=Yv+0e32DjQhV@6QA}D+gBEeZ*aG8~(cCmwD{B=N z5vXE&`Jn%yqF+d$(>y2Js6%8j&&Sqkgjwm!z^n#Q%J#6CQUDXFt=jEc{ceq8OO~@N z#6u6*pNt%QWDF>c54#`oTrJBxjIX5!)hc*uCHY!&TrDd?)x+!p4=|ta#G8kqLStyq z+(Em4=G{Ma_s@d+=iI~Nr}I&=lRkarDYOzwP@ETu-`-4VZGK*>OPSM5!12^s+nqD3 zHw9)jbt9ms_XAhbyn&@SEr3@*4#`62NZ;Fk36=yE^*i zf#{O~28#{}FLyE~;78GEa&)+(gL4rHXQgLUOX~56KERsJqLV`5n;_%_6g`K|3Ptnb zEqlSp8*Q|$x7W5F*7g~6!iL-NhmekN06&J#*d+zCh_tkVb#&e?U4rOgV8F-4G8z?dRxdEjGX z3Z+X=9X6)WIw#PKJx`V{HGneRQkHTwpcJI`u-mc(JV@FAnJbtaVC1J#*0D#D#6`Os zkpDrN6++|Llju>J4fH5I;HHcK*QFcHLUDHXd{%oc%s(}_H7*_@1zpH0Z^?|Em{*_rcWi-mtsfy*OEbFI7`=a7c^y1C@qK&($OC0=Y zN2Tz&-^bmYFwRSH@&duuehH!Rt5&w0Sw0z0z~F<^KTPt7PG!FX`C6!__HPcXd}9E} zmFPYuS7h!UIi_O|HngnB9DQ8IEpM~8NVg(sZ%nTj2BmClwq82~#!i?7| z0k+&QYjjYZ(D@JOCzJp>vgxi!YegAtSB@ALpSg7LqnBTL_Qh+KSL;hF&t1BB&G^Wb z3zwhKzCw~{QHE$Ko48zLJ7tO&7*AYUS#UUF&q^ErYQHz&Bsa5@oN1b8cPO%qV4yQR zB4O>$7NN}qjMFU<_q3b!}!U>sk0iSLU?TsOnRD@Bp9ztsfFtIU& z(3nu(?ICask*70_l?XhUX~L<@DjfL2LYmA6yBlh;1(TQ1>x}nCS+NuWnC% zgN{e(8{FjUf{s@PI@^yQYGW17FpUuO}cLHyS4=Afhk2)C@%EhL3m#!gRw+oPfyPu%0MxPe2fFbg3D2 zG@`c|NIw__&sbznNuUTEqtpxpv4!R`U|Z02(YvGrd#%J_B+!QXpAzjGm>f3_i3v43Lj8(Ts83Hq`U261s9UKENNTIF z)CG`dTT3X3jvV3S=1|{s#W08FcikMC*7lG&G@g@&We)Ag4BD34g%ZQ<;&&71GCIK< z$-UvnMn1Qg%jpG@kbAvRDp$a33k;hYQkjpJ*AXIn0F4^2R!c8zHCq?!8=GJ~wPfb_ zzAneSvI(U2=Olxk>?j&AYLMEx3Mr>a+r?*K!+$DduO|CI5@iB5ez1YCp%u@-u74t| zBpWu0i@SV$+@2N*~Gygh!L7lp#Q#`27%1pQYtR(bMNc zC`-Rm-n9|eN0B-MEu%;I+T@fO&tEZ(Ka3vZtLEW=*oJ##8b6NCiS>8Tx!zvqNpI)$ zJRWc4GUug?H4>WBdUhqiYx5DlkkqMeTRFj%0r%mcrjCon;xwkmYfPV&Hm3a@@F3wj zm6!6lVYWet#1Mw`r{jo`(UPYA ze9S#7*3PCs7f)!2BUyMM3nCaSKo(TW3t0eNVF9v$LifRAJnnfv7w^1J1^xS{E1~9z z4$k{l#V()s|O1Y9XKFA+Su4ZVRjk4};9wGHt;$8xRA$ ztsJvqjx3=EmC$~YK;gXvC%|!R4+36wO?aH!_n;tm8(1dIW$>W*QW8bYS}?{>h0=qo zr}EOW=6SKa_b25A66OEQm8WG1aPn>KH2QKu*S_pbOW;xoo|ZEa%g{G5Z2jwOV%Upa zS}o-En-ep)1o5zr%Pr@8Yhyfcu3U_FoHgbfilDMRdLwl0X*2XY=wJi5 zwNoMx=)I~l{czL{628IcE72S=ie~wY33n1SN*R} zTrwTmV*g-eUqAII+3-$X0rxaPu_1Mw+ShA6lzOeNpn!~j?F&4D5~tmaf9duz{-q@j z+yS}B9(2Y(kE_wHW&BI;PR2ja@E%OYzqEE4SuRnSI!5_2{jsRE*!J}_Bp8uNiCE%` z(V;w;20_%i$d6vV1OJ2uiqG zU|7n3?nDL((Jk+v-|0<{RILK%_g&7@_YelA(Cnd|81UNC5XZ>}`l|^2xgu(a=}P9p z%OVX3I%wNEmU5`ZxUIa#dqm2yMkc!L?n^oMA+Pbil;fDM-jBJGX&B!7ELU=zpuM=h z4f%M!4P6L23-oP}2AoCtHgr4KtUK1m+|shBpA)T(i+n<8nYD3oUT!P2t&NLIk=Di~ zF@?2bucVfP+!7Cg)Y_O^;$e75tc{BW!P>YuzxM^>uEuLW0Bhso5;JPq#J!Z;`;!u8 z)M{V(_CTzSz#`bhFrKkXt7YHXxVhCbtYmskd;pF6bEGG=eQRTB6VhwN$RNLOZ4_?N z;Ii8PP(%N-S3EX%;CH9z^TM%t0iB?S?~S-o?^FOPpp&%T%{D^qmk@Pv0XMAJ-BB&w zJUY#^eI0ce6*TA((305cFfwqC6a8dj*AWZufGDtScBj%ziDe?nIZ3n1$10mWY7aVNE(1oDjzoFO;CP7r} zLv>6h5<+GcQxVlyVNAHPfw?Ne5TnM3RAT+K$&uCR7UGj6O^Hp8K|6^LFE%+wZl59K=s}80jt&q$a%4mIlcQ6o zdstbQnx$=Wbo%Rpv*KsO`0{D$j2N)Kv?KOssYsw5qhDbO*~`U)56yOkbXKt zPcquxk5Vx_UM>$JN(DFsn;2VfT-c@6vadJZ*Bi_4L+&K@l}R{3@j1495%k%OFbRzC zVG$;Q1>80QgHTu-fd#rR=LGsq3ej53Z*n1~-=uIQGH<(<2@nsN-{c-*T)XTynSA&X zdcTkSCja;b`qD5u&U-)`tww1TZd`Ae%~ri(TA}H4g?azY#OBj~_C5+1d5(edexvYI2p@UcG7IahOauFhV+aOGL+T3AY=a*XX?w40mV z;YGW@t{~0jiSNU^c0Zp$0}g=L{ZqVT@t9LY;OnkY^SE2UJ4mOH_g2PD-<5hd&>Ka# z(ei%%7x((k*QFNSac77{dWn3qnDY+Fx#3~TH#WH5i;PSArhJh)<|UZ%Q;eu5;_x!R+jv&_?hqv zzWGih-`|5f-F)08pOaunqJoy_t+fdb-d)EKe@(m$L!C&%at!6;VMwB+=P{Jubqw)B zu?$0O^vu<47htOK>DLsPQ=36&YH<5idl` za16R^Zf)iNw~B38?>B$KNpSPC$c@>tpx30uGt zFJjBE)QOBOUvDxYTe9Ay_Fx`IyCP-_IO64O8IC#;wBh${N{1A>$0{U zA#3ZM$lAXx+uNWgzoI$V($8T<&+WGKs7J`xdMEO=94j*UT7nhZwSmXVuE^9M=W~*J z&!s!k+G^kG<<5LMFRu@-RjQ`3*)Z2C_q4Ga-ea3K?t5kAwF$|~ob;`{tS&8EkiMOh zzMYr8U68(Al)hb(zFn3LSPHH#9k8C4HBgYfoRhtrm%Ut&yj%FfBl&dbWq%gZjv$}Y&uF3QR-%F8av$}Y*vF3ZX;OJ`V@ z%&;80AaMC-P0vXo0D(tRHZO&sAc3GDEjcGCIVUYSFDW@MEx8~mxgagMC@HxpEx9Bq zxg;&QEGfBMya=~xK7j&uEOZi07+X!VVKg^|yh_8Emvhh$1_G{L5Q1c4Bqg2`!-3nq zdcpEoC(g&EGI8oJm6@v?xa&8}W__!%YVvNrk|gEt)2kOexg2&aDRFXlu3qpA@4;NX zkOWo{PQJ@{)9x|1a?zqBZ%HQ3J*$5=1Bbd=^& zfG7(ZMHQLmCW32+CJ))q5@%=E973q5;snH(BvenjwEZk`AT?Ylm)(p=`&r`b8-F)K zGh0R^=JJr)ewH}j%5&3lIq8R7B6M=I(PUKqv|LUdPnM|tEO8#5XKgs5clLgkxL()@ zJfgQtj!rHrA33rlQ+{%E>U4`7liGfkIBR3_Ak;}S$L7NGeFvJtX{55qQ|ynH$euw5 zAXK&1Y&Eu4ol8v2do4VQhi_jht(%Q25XV~CFteTHA=%Fo*EEfwRkZiA#C7Gas)Ak+ zlWw=87NWTT_V22aX0zOZl+Zu{XLw&G(_;||xsVK|9_jJHsD<|Ls&d%;{#{k4eWqRM zl@+^|K~rCz7w_@oGibske+!^EVLBJe3gJR3s&gqU%tHZkw-QTx;mx3_FBiNQC#CVU zlhoS2UXs5!iEF`1ho>*kkp!Cz0$B!47(t$cL6!h(uVv6Ac;{|jB3R!0m;CJQo0kYF zvqX(n22FUeOBejy&$Om{|K=saA=t#&EMq>uJF8_6C8E14HB6ncmFGcO86DTd&J9-k zH!pqAZeD8ZzT!A?&R0-?b3l7n!xi-T&QF0(%DjlP2q%c_ksFJxk%@ZEU2_gd?~Zf8 z{wc6i{+-@;IC#H19K^k`IJbXeF_}vAV-M%ym*JkDr8Z%mJ1_h&Lw(iy&w=pCY20nN! zGY$0Pe5BN1;n^9G!Dtv0(SQXsjK@X;QYUafP3nSF^#JZA$G6NR z8g<9ugZ`ih{bUgIQ^C+55~0@w=nuC6{SglO=~&Po6`?;C1pQ1f^pA+pX9Vbvw*mbL z4*HW(&_9RzV6v1P{2aha9)6BsMTehhtQ6qq1XkwY=Ok9n4xxk@OfXouyT0``>gvL`=l${c}^kae?W0t#qoNd4v~LCmwK z?%yT>M~~u^b+Lj$`1ND#fpFz>>_LZz^X#FZ|JQgTy}rDdi!J-dEVxjJS1`Aj!{1ZT zx0T;cLNxT?(_7W*g~s|-h>O8_)LUw-H}To`lBZDrCwrS_bq%SXwB>DtKB-!C1=}(C zx>0K0#=|HDTa8r<1Y(e-`{42RT_PRBu|l`5zG`0_SCIq=|44m`M%0}t-xpzlJkvpUNr zi_B?`J&-xgvj;M#I(sPS|8D2k1A-rYH*Rd-aT~d%o_vkWHSjHuv>zUU9DBBzQiO!V8ICx@*YFuY7u)60GR`5*bx%pK)z`+?DMcGbNhDWZrN`6 z==~QoV29Pm!}i|~ar^Jj@%!(GqxRn?{QK{YrQ0F3Q*ri`I^KmX_Q$X!)F&CA&FO!n zh#MF^rctrO+RxeLWDr!lt)O3pRrSlBRW)s|ssryOv3~;#-K@H~x|}`A9Ub~CIT~JY zNBbj1(ccv75$Qt{seLk8h9)$12qdN`X z5dB8ZONcC45XOmU4#Ido))H^0e9R@j61K!Ia!dRveu-a-TH;Upm-vp4fJ17hXTDSF z;VvxkpO7W~5tr!yje^ed+b1P@!G6jX`P`magDHDK;$(^MRCvCFS;iMgJskc);=2Cw)Y^guT zE%lG`Oa1w%rT&6{sqa`{GNg8@AUUNT@77X((ItKPQWx?lu%!-{jqVZpcE?AT`b+M3 z$N6DV3fcSIbox20_kp06np&0@%c&3h)w(Dga6>s;|ejr~)QPTYv)67L1*= z1%oDSy++%5BTif2Bj%t_I1)TO++YvH9AvNuVh(zfJrv-f#2)70;U;^S*T0%XC;0X4 zRd^eD*a367Lab6taZ2T5wUv+6mL97ueF^`rg8mWcUcos>uxRME?lt>f^0cFS1(V@= zqg1Ydf}fZVnO6$x`(*OA`PMEgWOEAGbKD7s2Y15Z!JTk;a3>rd+zE#Vcf!#Rm6d;` zpp*QZyQT@-6~NNe=5>K>e@Q{V2pIfZ9t;XLA&+5oH|#;4r7Ur1cpxPt$E^RTto*U( zs6mxWa%+JrUG0U#HTnx>< zKLkW5XdO8%>mHklKq3^z7k$9iLGQ4~hW;YBudebgl*4Kna~`<^s)_qr;22d!1L_*? zvo0ymOrf;vP)a@ZZ10HU#Yb#wj1?!Mk6$R4HfA?wsdd%1yvhsR5erQh4>PNcJ|?tc z8E*WaS$*sC$>MPv4$CT}AK+U6Age0q1Y{@68@<=sJWOk%$T6az3LV_02#1&L6z@wg3ABBJSN8;@JwhJL3Shgx9n; zx09bh*SC{b(Sdgpm;=mqAVaw?B+da)hqjXkQR*)h>vbeRKl;(v1c2gU3V3q_rfgRj z8K$gHQGk8*o8SM;JMCjCfB!J`U%n>-6c1Ct&0=5*dgymK>C9m0Yqyp z<-d_U4{p>>z$2|kHDx;X)q|nof7mULwN;!rGOf*SCm%y6x04w(^KN1q*{3jn*Wl_& zL4IsIc@|~fO&miijkPp#0i{XQ$Y@Gco*O%9vGd%-7&sD=UHby3g^7%Ob4l*lX1kNBWx-rNS1q3 z#~^VCiviN#jfW-6&#vrXsbkXXCPd)=)+t2o;pl6zEwlF=3_ZS`Ttdn1WDjDZ6H7Ef ZQNW&jq7$sv46DNAD_Vs;`KkbE{eMEc31R>M literal 0 HcmV?d00001 diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.data-00000-of-00001 new file mode 100644 index 0000000000000000000000000000000000000000..7cfad826eabc2f02267cbeea8148103d3a73705a GIT binary patch literal 9532 zcmb_g30#fY`!9;yrR_~hme6+ZZM!X^srQ_FQwWiDQgXZH;&NN2g~;eiX;+wtL?pXx z$<{q@w(K)xj55Q>jF_K&9ZSRiJ#W3ujS}X6KA-D(pY=TFJm2%26Xk;+D|3o#E44E! zDxpJP#CxDc<#^Sh;gyx1lV*0Nsx?Z$%I$e*;ZjuDQTz+q9(jj+8f=1UAC*`B^81XV z3R5HKn|=y}-_(YdFJ4smUF4~D0g77 znt)xaDD=znACcR*)v(X>O`wZ?2AU8*9VR+u!O3^CV2ED>X!vagTW3gEEqbK$!jU0`;=P!*zGjrL!z zuLylB1SeBhfX%;OCFgRh(f9Q$foQ^^Bi{Wo$obw9ZrHRb4FajI&{!&DTv z=8)>0^AL3MU^nDo7>ep{0d)Y6s1Z-x8gIJw`iI)YSLsbci5Ap*0AFl&8f@7-Ol}70C zfkPDyiUd^sraR*7dhB`h_A#`i+67FxTZ%TXs0Rgo1Hi7k9^kd>OkfgGPi8+)0{0Ya zLI2dfAlP3U3>{wu&fOe=Mh=~eG8erkrIA8?hXak-_=Ijo|Ym}hd^@Ho~Nqo?>D0%uM1Jsl1eZx`=n}?cqrPiH3YayE|B@D zkWAREN7ipRj)b?7r*!2k^gQ)+#r%EQ=!EA&)NsrLB^GW&j~9B7haRVas^iDd^!a}1 zU|bDaGqe~*oZE&Xch3SlwWomME_p!rfBVs>%x*xh%#EC8It`p3GZH1fDF)$jyFq=! zZ{+eDAwZeBUDa8y033UihWgmW0%3kCa7-!##LuVEC>|JxJY}1piJh_Q$y#8 z=B7J}0wcXe6U#Hfozd=K(R>rt(pW=sa?e#Fjq9qza9UA)j4)K zHQI13HS9!hYF^zz()`{7v~hAYSTyS{Shww_yKk})xHb3)$Q);nigv6*7ayM{(+&kw z%Qo(&B*QH#r=<^|az?lzgUnzsvy6-S-hW$h#l5i(o{)0Q|Y5nCjk8K>1I~rLG1QP>>fx$RZ{ad82lfNmX-aPCg&M4VDIAHyf!Ccd$ zgQf4@+4>vj3vUU$oku?D=oIAX$uK>~_M#c}=L{?o{&g9`G?4t@hA511* z8H8R3ohJ=Wm3nGKmmOsNvCpQtdFY_o&w%xJaIqnMvmoQYW%aF!7H*22%rc*vy=C_#fFMY17dKEK*WMh9omFjt59XqgGeb79% zRbrVM#oz3Y(Xo0gX9u=te^}P^&HAxa*`#CjEU#pJ)%>um#lSjN&}7HhsC_eWuuYQ; zQ}v&+NyqA0Uim=`R*%h_2ex4uHG9^!IR>_A(qpRrQ(_$+tiSqq_>(+@E($@~y-s96 z+Pvoa;~39-Jwstj?VIbr;Y}&I>u_Ep{>JoD8QhYQAI5Bo-PZh!9NXZ*`ZiIPZ&Wt@ zi*>9(&7QSobhMlqYE+58S-wf#!iJU2OkIzX14STzfFY^%dR2?{mvTE&>m(k{>t8nK z80nu<#H_#Kh=r>6kp!}Hy&LMz^+vJtw9ta*?u?F|`^GaZ6gt=*;kmGE!Mo8&KXx=@ z6DJ>w4)5oqlG9gV!06xM#l>=X%47pfwUP+Go$qg#^HY*Q{yGotdtxfW{+BO#2wo@E zz*@-#SQ78%bnM4hu=dzSSUGn(?09AtJbU(UP^)PNOU4)wa0e~iqogsV!mhk4ZbqrIgh_g$8MMMuHc;UQGWO&XGS;o)M4wW_y$42>KtY+ zI6GG`!+eln?XD6FZ>xn?w>v8P|8%CrGH-)@KOBo49es_O^X)mf+N$Kep=JL?$E_}o zJ7DD~)397tv{$fuRuMCY-6Us01aBwreC9wquZ#U`d-d-vxIQq8(IGI=#&Qd^jorAI znO}Nktst>DK`_WP%=**=2cG-~dn=Fh5Sv8hQww%(OoL{hvSIlk*%ID_j4JCt6v@2w z@8fwPk9S&cpQRF9Ho4C1k*Vw!Z)ln`-+ptx?KOj$f<$E2H;!MS?tKYsetBFM6v^~6pEEim)! zJJ{^KWMOkEM#j{}RP4^+PL`O?&}y-nMMVl_xuyO|i(29sYxk{gt|#UV_dI|vUcQ9; z9x36=?)zZ8L8S1`h*H}n$6xYJtuBG1770Yy?ry2RU}*10FsA4h{5#jrDab)bRHso6 zrTc?HT6`!lzy6RsebW+b7hQFzDe7)F-&4bOX_PTIbYeMk|32b$=iRRl2H*JkgKvAx zw4V~F1fp6S;CZbZ<(#n}L`|MdhMbJT{gmkym^#`-QN-+R&HX1WWDGdlS&zaxOS>%9 z<)C{cytY#$JG@`?Aiz^}$ue1_k!9_?)w|Z&MZaEXtQRBdmiJafn`;z5I_s#v-LmT~jpgQ|zBZ)ON!)W(g>KHyyNU~Mrq z_=BRZ7M4KsSIMy39}g+F72Dv*+%@c6*j8g?`emw0dm9{oJqL3BIZ6pKwnBH90dJ?B z5TDZ?FP0}%il^mdi7g2e@uq}Kv5m2fc)LlNC^NxIoRL)^?&o789$BzT+c2}JAZT|5&ZIf|kjP);>i^ab!Fcj+?O%g}UN)??vt1s%) zD^-N^W=D9iLGLaP_Vn8xx6jlp?FX?;VJ?1hWSN-iw@!Sx%2v$py+(YvJVS6hXR*Ni zcAjlc!5XpV5*{6QwU`>dO8i#eN?eXsilfS{s2)@~c>Uc~5R`ov{+XhL6Vq?PNL%+I z^wxO2<5fT0bcOq42W}URCec|TohysB7jjKG9f)oWBbg(QogtYclTV)!8$)Zl(3&PQ zC)%uIt7cBLSx45)nKsjE)y$bT(_+nBxVju|LT9!#S}qM$$YQuAoKA#MY_wD!fqBWl ziXNQKgmJWNdYpo`ohOZzM@C2#kufo}k1mHp=**HuOJgL`C^@$)M~C1@Bw5yq7!q5W9W0nBRYr6Vx#3@ zFln0HG&Q$fVH`@`&jYS}f!0bKI~|#8g?lWU4$YmX0!zjuIXj zE{lkb3y+}<2GItyWs1l!x_QyfuN%jmFp$JWO{Lu>F|o2JNxVYJtlNZZ#QBEMkC8>s z#_^ll8l4Zyuk*o2MGyNOin{_V5Nj#@l~xoLxCz+bOL%MxPBS zeY)l%w&p^5o3KEvE|^SAMUWzFrf?`;2pJ8HK4<05SHX=-kHWxWAjZwi+9&+KY^FKzA+N!|1N%)QpV}WzE>| z`V=+>t6{@i9UDIFVPnY0*!a(PRaef}UQX7Wo?nEGq3Te;*chfRm_h`6R6RYWdJ?Bk zp@UVnSHnPJ{73N{u}8;m6CaJL_F`jv(DxX|2YrwIi;oXw&G_*B6h4Nl;bVk4K1Q~O z55JG`@t>avT{&O-Ca~sMd=WlIslx%|W3;;96Zmlc6h2sGdo}z7KAhXeN298}*tqU} zX>e#ZVx&XkJE&vhJ4lOu2QeL`3vJ;~TQCfE`J7)U?c~}Rc1&Z~v5jHJH9oJ#H$JZ> z(9bK{aw4JWMw4#|&4DxtAT$TjWD=q2&h5krBsw?=X&OXRCz?*CsWX$yIKuCvqodP& zXPXI|-~WzkH+g$|(%OFOwz&7rT+{-$(6kpDZybI{(mR6BB)t{*-AV6(Gfn8l<98>$ zYW(gDQh#^STZirGJ;V0&W?_4Jm#}?cqy1#o-k%0&a5F$@W(v#ThmeG2@IxqsWn5@R z$}+Ar6Us6JKC|4P1Bnl92GM_eg{l9gpv#%6{_5A^$O!G&$XKa@tHYT_Xop3{MZ_|N zZ|%$(iKj}#9m5@4T-#1+m1_R$)X}NEI<*~t_UFWRZZCdo?+wEi`7@2b_3T}~K02HI zC!)Ky7d?as{OHgbtLOgjwb*+81HOKSru^HR!L7X++OBUCe%PzG^%V^K`uP8@;1e|% z)L#7dYrx(vX05Nly}cQvL{J-5W>?TWgLCU=OxWHO5@OOvQ?xk5lBWBujS1R4#cfaU z?;4mYv@t=u$N0mB#`5^tC2MTi)99AXv@D|!nq8Y0SYyXCb8Y@FGRLMr@EwJYOfzh6 TUt7Bs8+&K#L7BGNPSF1Ywo+HN literal 0 HcmV?d00001 diff --git a/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index b/behavior_metrics/models/rl_models/dqn/DQN_sp_16x16_LAPCOMPLETED_Max569_Epoch1455_inTime20230413-103523.model/variables/variables.index new file mode 100644 index 0000000000000000000000000000000000000000..b14faf6111abdf49f4884e55389aadbc8e86af0d GIT binary patch literal 1393 zcmZQzVB=tvV&Y(Akl~Ma_HcFf4)FK%3vqPvagFzP@^WHCR8)F~}2a zSeTs`s6!=Q=OA>PkY1jbL;Fb5chQUNIC$TcMD84+iBqP2& zH8VYOA}wTAXyDf1F||HYrbMEJ z%nAoMH4ZSEK5G4afx&<;zn~;DH?u0WNIyQa1Q^97nfZBm1D6vRr9Ipl9Hw2Zs{6P# z_;OMci}Et_(&K@8Q}M?NgTfSUVATHZe#*zKCRUbMl$n^6lUl5Ah+i8#UKD0<0|R;e zowwV?WW)^!YXXHYv%(E|kav~lc+`l=03#G{Y6YbmW`!Awz@Rl2{GJTdU_^|DB_Iuj z#`9bl48@QF6yZYxrXYNH1LVWX%)9SJWrU0gTP>h4g9oUgSfukCP=g6E8g76zJlC(% z7nKn=#jgS6EJzxi!3#>G^?M6-fx66y)+GwkHPzZ+0?={h_;o>Y>@@@kdj%rfQw@-7e7b{KM4Qb(5+JHej5O1d0F`Y literal 0 HcmV?d00001 diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py index 4652b20c..a598ba02 100644 --- a/behavior_metrics/utils/environment.py +++ b/behavior_metrics/utils/environment.py @@ -78,7 +78,7 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con time.sleep(5) -def close_ros_and_simulators(): +def close_ros_and_simulators(close_ros_resources=True): """Kill all the simulators and ROS processes.""" try: ps_output = subprocess.check_output(["ps", "-Af"]).decode('utf-8').strip("\n") @@ -114,17 +114,17 @@ def close_ros_and_simulators(): except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for CarlaUE4-Linux-Shipping {}".format(ce)) - # if ps_output.count('rosout') > 0: - # try: - # import rosnode - # for node in rosnode.get_node_names(): - # if node != '/carla_ros_bridge': - # subprocess.check_call(["rosnode", "kill", node]) - # - # logger.debug("SimulatorEnv:rosout killed.") - # except subprocess.CalledProcessError as ce: - # logger.error("SimulatorEnv: exception raised executing killall command for rosout {}".format(ce)) - # + if ps_output.count('rosout') > 0 and close_ros_resources: + try: + import rosnode + for node in rosnode.get_node_names(): + if node != '/carla_ros_bridge': + subprocess.check_call(["rosnode", "kill", node]) + + logger.debug("SimulatorEnv:rosout killed.") + except subprocess.CalledProcessError as ce: + logger.error("SimulatorEnv: exception raised executing killall command for rosout {}".format(ce)) + if ps_output.count('bridge.py') > 0: try: os.system("ps -ef | grep 'bridge.py' | awk '{print $2}' | xargs kill -9") @@ -134,19 +134,19 @@ def close_ros_and_simulators(): except FileNotFoundError as ce: logger.error("SimulatorEnv: exception raised executing killall command for bridge.py {}".format(ce)) - # if ps_output.count('rosmaster') > 0: - # try: - # subprocess.check_call(["killall", "-9", "rosmaster"]) - # logger.debug("SimulatorEnv: rosmaster killed.") - # except subprocess.CalledProcessError as ce: - # logger.error("SimulatorEnv: exception raised executing killall command for rosmaster {}".format(ce)) - # - # if ps_output.count('roscore') > 0: - # try: - # subprocess.check_call(["killall", "-9", "roscore"]) - # logger.debug("SimulatorEnv: roscore killed.") - # except subprocess.CalledProcessError as ce: - # logger.error("SimulatorEnv: exception raised executing killall command for roscore {}".format(ce)) + if ps_output.count('rosmaster') > 0 and close_ros_resources: + try: + subprocess.check_call(["killall", "-9", "rosmaster"]) + logger.debug("SimulatorEnv: rosmaster killed.") + except subprocess.CalledProcessError as ce: + logger.error("SimulatorEnv: exception raised executing killall command for rosmaster {}".format(ce)) + + if ps_output.count('roscore') > 0 and close_ros_resources: + try: + subprocess.check_call(["killall", "-9", "roscore"]) + logger.debug("SimulatorEnv: roscore killed.") + except subprocess.CalledProcessError as ce: + logger.error("SimulatorEnv: exception raised executing killall command for roscore {}".format(ce)) if ps_output.count('px4') > 0: try: @@ -155,14 +155,14 @@ def close_ros_and_simulators(): except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for px4 {}".format(ce)) - if ps_output.count('roslaunch') > 0: + if ps_output.count('roslaunch') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "roslaunch"]) logger.debug("SimulatorEnv: roslaunch killed.") except subprocess.CalledProcessError as ce: logger.error("SimulatorEnv: exception raised executing killall command for roslaunch {}".format(ce)) - if ps_output.count('rosout') > 0: + if ps_output.count('rosout') > 0 and close_ros_resources: try: subprocess.check_call(["killall", "-9", "rosout"]) logger.debug("SimulatorEnv:rosout killed.") diff --git a/behavior_metrics/utils/script_manager_gazebo.py b/behavior_metrics/utils/script_manager_gazebo.py index 5317f32b..361266cb 100644 --- a/behavior_metrics/utils/script_manager_gazebo.py +++ b/behavior_metrics/utils/script_manager_gazebo.py @@ -67,7 +67,7 @@ def run_brains_worlds(app_configuration, controller, randomize=False): f" for brain {brain} in world {world}") tmp_world_generator(world, app_configuration.stats_perfect_lap[world_counter], app_configuration.real_time_update_rate, randomize=randomize, gui=False, - launch=True) + launch=True, close_ros_resources=False) pilot = PilotGazebo(app_configuration, controller, app_configuration.brain_path[brain_counter]) pilot.daemon = True pilot.real_time_update_rate = app_configuration.real_time_update_rate diff --git a/behavior_metrics/utils/tmp_world_generator.py b/behavior_metrics/utils/tmp_world_generator.py index e2e395ec..69bf70c8 100644 --- a/behavior_metrics/utils/tmp_world_generator.py +++ b/behavior_metrics/utils/tmp_world_generator.py @@ -34,8 +34,8 @@ def tmp_world_generator(current_world, stats_perfect_lap, real_time_update_rate, randomize=False, gui=False, - launch=False): - environment.close_ros_and_simulators() + launch=False, close_ros_resources=True): + environment.close_ros_and_simulators(close_ros_resources) tree = ET.parse(current_world) root = tree.getroot() for child in root[0]: From 02d781d48c5590acc87cbc18bcc51e67bd3b1e12 Mon Sep 17 00:00:00 2001 From: Dhruv Patel Date: Sun, 16 Apr 2023 16:55:58 +0530 Subject: [PATCH 11/36] fixed spawning pedestrian --- .../town_01_anticlockwise_pedestrian.launch | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch index 1b24f8db..ff07c3f4 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_pedestrian.launch @@ -21,23 +21,21 @@ - + - - - - - + +' + ' - + From 26d124da5ee59f632df9748d047c698de7e44dbd Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 19 Apr 2023 14:55:29 +0200 Subject: [PATCH 12/36] Config async/sync mode via config file for CARLA --- behavior_metrics/configs/default_carla.yml | 5 +++-- behavior_metrics/pilot_carla.py | 9 +++++++-- behavior_metrics/utils/configuration.py | 2 ++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/behavior_metrics/configs/default_carla.yml b/behavior_metrics/configs/default_carla.yml index bacb14c2..670e59ec 100644 --- a/behavior_metrics/configs/default_carla.yml +++ b/behavior_metrics/configs/default_carla.yml @@ -34,9 +34,10 @@ Behaviors: MaxV: 3 MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' - PilotTimeCycle: 100 + PilotTimeCycle: 50 + AsyncMode: False Parameters: - Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + Model: '20230125-120238_pilotnet_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_PAPER.h5' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True diff --git a/behavior_metrics/pilot_carla.py b/behavior_metrics/pilot_carla.py index 5b587bcb..1f9453e5 100644 --- a/behavior_metrics/pilot_carla.py +++ b/behavior_metrics/pilot_carla.py @@ -88,6 +88,7 @@ def __init__(self, configuration, controller, brain_path, experiment_model=None) self.real_time_update_rate = 1000 self.pilot_start_time = 0 self.time_cycle = self.configuration.pilot_time_cycle + self.async_mode = self.configuration.async_mode def __wait_carla(self): """Wait for simulator to be initialized""" @@ -125,14 +126,18 @@ def run(self): control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() - control_command.command = 1 + control_command.command = 1 # PAUSE control_pub.publish(control_command) while not self.kill_event.is_set(): if not self.stop_event.is_set(): control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() - control_command.command = 2 + if self.async_mode: + print('entra!') + control_command.command = 2 # STEP_ONCE + else: + control_command.command = 0 # PLAY control_pub.publish(control_command) start_time = datetime.now() diff --git a/behavior_metrics/utils/configuration.py b/behavior_metrics/utils/configuration.py index fea6b62a..ceb0cf48 100644 --- a/behavior_metrics/utils/configuration.py +++ b/behavior_metrics/utils/configuration.py @@ -99,6 +99,8 @@ def initialize_configuration(self, config_data): self.real_time_update_rate = config_data['Behaviors']['Simulation']['RealTimeUpdateRate'] else: self.real_time_update_rate = 1000 + if 'AsyncMode' in robot: + self.async_mode = robot['AsyncMode'] self.actuators = robot['Actuators'] self.sensors = robot['Sensors'] From a2635c47b0c877467875b9c9ac162fa13ca005ed Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 19 Apr 2023 16:23:21 +0200 Subject: [PATCH 13/36] Updated CARLA config files --- behavior_metrics/configs/default_carla_multiple.yml | 9 +++++---- behavior_metrics/configs/default_carla_parked_bike.yml | 1 + .../configs/default_carla_parked_bike_car.yml | 1 + .../configs/default_carla_parked_vehicle.yml | 1 + behavior_metrics/configs/default_carla_pedestrian.yml | 1 + .../configs/default_carla_pedestrian_parked_bike_car.yml | 1 + behavior_metrics/configs/default_carla_tensor_rt.yml | 1 + behavior_metrics/configs/default_carla_tf_lite.yml | 1 + behavior_metrics/configs/default_carla_torch.yml | 3 ++- 9 files changed, 14 insertions(+), 5 deletions(-) diff --git a/behavior_metrics/configs/default_carla_multiple.yml b/behavior_metrics/configs/default_carla_multiple.yml index a27cd8be..69de86e1 100644 --- a/behavior_metrics/configs/default_carla_multiple.yml +++ b/behavior_metrics/configs/default_carla_multiple.yml @@ -48,19 +48,20 @@ Behaviors: 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py', ] PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: [ '20230125-120238_pilotnet_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_PAPER.h5', '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5', - '20230130-113041_pilotnet_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_t_5_t_10_dataset_AFFINE_PAPER_cp.h5', - '20230127-175740_memDCCP_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_AFFINE_PAPER_cp.h5', + '20230220-105422_pilotnet_small_more_more_LSTM_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_t_5_t_10_dataset_AFFINE_PAPER_cp.h5', + '20230222-110851_memDCCP_small_LSTM_2_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_AFFINE_PAPER_cp.h5', '20230127-180655_pilotnet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE.h5', '20230127-180856_deepestLSTMTinyPilotNet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE_PAPER.h5', - '20230130-122642_pilotnet_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5', - '20230128-113936_memDCCP_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5' + '20230221-162112_pilotnet_x3_small_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5', + '20230221-175351_memDCCP_small_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5' ] ImageCropped: True ImageSize: [ 100,50 ] diff --git a/behavior_metrics/configs/default_carla_parked_bike.yml b/behavior_metrics/configs/default_carla_parked_bike.yml index bd8efae1..daf30e07 100644 --- a/behavior_metrics/configs/default_carla_parked_bike.yml +++ b/behavior_metrics/configs/default_carla_parked_bike.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_parked_bike_car.yml b/behavior_metrics/configs/default_carla_parked_bike_car.yml index 6942aaa4..ebee5b85 100644 --- a/behavior_metrics/configs/default_carla_parked_bike_car.yml +++ b/behavior_metrics/configs/default_carla_parked_bike_car.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_parked_vehicle.yml b/behavior_metrics/configs/default_carla_parked_vehicle.yml index 0a71b604..fce12b36 100644 --- a/behavior_metrics/configs/default_carla_parked_vehicle.yml +++ b/behavior_metrics/configs/default_carla_parked_vehicle.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_pedestrian.yml b/behavior_metrics/configs/default_carla_pedestrian.yml index 2b72b1ba..d28137b6 100644 --- a/behavior_metrics/configs/default_carla_pedestrian.yml +++ b/behavior_metrics/configs/default_carla_pedestrian.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml index e18b3a27..bc931f8d 100644 --- a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml +++ b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_tensor_rt.yml b/behavior_metrics/configs/default_carla_tensor_rt.yml index ae173095..3b860815 100644 --- a/behavior_metrics/configs/default_carla_tensor_rt.yml +++ b/behavior_metrics/configs/default_carla_tensor_rt.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py' PilotTimeCycle: 1 + AsyncMode: False Parameters: Model: 'pilotnet_tftrt_int8' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_tf_lite.yml b/behavior_metrics/configs/default_carla_tf_lite.yml index f3f1b38e..911595ef 100644 --- a/behavior_metrics/configs/default_carla_tf_lite.yml +++ b/behavior_metrics/configs/default_carla_tf_lite.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py' PilotTimeCycle: 1 + AsyncMode: False Parameters: Model: 'optimized_pilotnet_models/pilotnet_dynamic_quant.tflite' ImageCropped: True diff --git a/behavior_metrics/configs/default_carla_torch.yml b/behavior_metrics/configs/default_carla_torch.yml index cdfdee4d..3fd0ba6c 100644 --- a/behavior_metrics/configs/default_carla_torch.yml +++ b/behavior_metrics/configs/default_carla_torch.yml @@ -35,8 +35,9 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: - Model: 'pilot_net_model_best_123.pth' + Model: 'pilot_net_model_best_17_04_c_3.pth' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True From d51c93474979d74c5f99e12a12a1d8ce232522eb Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 19 Apr 2023 18:58:19 +0200 Subject: [PATCH 14/36] Save async_mode config value --- behavior_metrics/utils/controller_carla.py | 1 + 1 file changed, 1 insertion(+) diff --git a/behavior_metrics/utils/controller_carla.py b/behavior_metrics/utils/controller_carla.py index 8390831d..6a3c1156 100644 --- a/behavior_metrics/utils/controller_carla.py +++ b/behavior_metrics/utils/controller_carla.py @@ -238,6 +238,7 @@ def record_metrics(self, metrics_record_dir_path, world_counter=None, brain_coun 'carla_map': self.carla_map.name, 'ego_vehicle': self.ego_vehicle.type_id, 'vehicles_number': len(self.world.get_actors().filter('vehicle.*')), + 'async_mode': self.pilot.configuration.async_mode, 'weather': { 'cloudiness': self.weather.cloudiness, 'precipitation': self.weather.precipitation, From f564fc743bf795bcfccbe76630e975a665320a40 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 19 Apr 2023 19:02:20 +0200 Subject: [PATCH 15/36] Removed print --- behavior_metrics/pilot_carla.py | 1 - 1 file changed, 1 deletion(-) diff --git a/behavior_metrics/pilot_carla.py b/behavior_metrics/pilot_carla.py index 1f9453e5..b8e9d98a 100644 --- a/behavior_metrics/pilot_carla.py +++ b/behavior_metrics/pilot_carla.py @@ -134,7 +134,6 @@ def run(self): control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() if self.async_mode: - print('entra!') control_command.command = 2 # STEP_ONCE else: control_command.command = 0 # PLAY From 94c0ee0dd48d5ec4bee031479fb34aeea3dacf78 Mon Sep 17 00:00:00 2001 From: enrique Date: Mon, 24 Apr 2023 09:54:59 +0200 Subject: [PATCH 16/36] added asyncmode on the carla_subjective_vision.yml --- behavior_metrics/configs/default_carla_subjective_vision.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/behavior_metrics/configs/default_carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml index 8a95ffd9..2dd57817 100644 --- a/behavior_metrics/configs/default_carla_subjective_vision.yml +++ b/behavior_metrics/configs/default_carla_subjective_vision.yml @@ -35,6 +35,7 @@ Behaviors: MaxW: 0.3 BrainPath: 'brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py' PilotTimeCycle: 100 + AsyncMode: False Parameters: Model: '20230107-205705_rgb_brakingsimple_71_58k.h5' ImageCropped: True From 83915ae6502e807222148c9d518c9aac0a516488 Mon Sep 17 00:00:00 2001 From: Qi Zhao Date: Wed, 26 Apr 2023 05:57:09 +0000 Subject: [PATCH 17/36] added on npc vehicle with ad agent --- .../brain_carla_bird_eye_deep_learning.py | 5 +- .../single_ad_npc_waypoint_publisher.launch | 13 ++++ ...town_01_anticlockwise_single_ad_npc.launch | 69 +++++++++++++++++ .../configs/default_carla_single_ad_npc.yml | 75 +++++++++++++++++++ behavior_metrics/pilot_carla.py | 6 +- behavior_metrics/utils/configuration.py | 4 + behavior_metrics/utils/controller_carla.py | 13 +++- 7 files changed, 181 insertions(+), 4 deletions(-) create mode 100644 behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch create mode 100644 behavior_metrics/configs/default_carla_single_ad_npc.yml diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py index 99db5063..19fbedfd 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py @@ -56,7 +56,10 @@ def __init__(self, sensors, actuators, handler, model, config=None): world = client.get_world() time.sleep(5) - self.vehicle = world.get_actors().filter('vehicle.*')[0] + for vehicle in world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == 'ego_vehicle': + self.vehicle = vehicle + break if model: if not path.exists(PRETRAINED_MODELS + model): diff --git a/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch b/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch new file mode 100644 index 00000000..0c67adb0 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch @@ -0,0 +1,13 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch new file mode 100644 index 00000000..df81a82e --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/default_carla_single_ad_npc.yml b/behavior_metrics/configs/default_carla_single_ad_npc.yml new file mode 100644 index 00000000..57564423 --- /dev/null +++ b/behavior_metrics/configs/default_carla_single_ad_npc.yml @@ -0,0 +1,75 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 50 + AsyncMode: False + Parameters: + Model: '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5' + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise_single_ad_npc.launch + WaypointPublisher: configs/CARLA_launch_files/single_ad_npc_waypoint_publisher.launch + RandomSpawnPoint: False + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/pilot_carla.py b/behavior_metrics/pilot_carla.py index b8e9d98a..15df0bf6 100644 --- a/behavior_metrics/pilot_carla.py +++ b/behavior_metrics/pilot_carla.py @@ -23,7 +23,7 @@ from robot.actuators import Actuators from robot.sensors import Sensors from utils.logger import logger -from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED +from utils.constants import MIN_EXPERIMENT_PERCENTAGE_COMPLETED, ROOT_PATH from rosgraph_msgs.msg import Clock from carla_msgs.msg import CarlaControl @@ -89,6 +89,7 @@ def __init__(self, configuration, controller, brain_path, experiment_model=None) self.pilot_start_time = 0 self.time_cycle = self.configuration.pilot_time_cycle self.async_mode = self.configuration.async_mode + self.waypoint_publisher_path = self.configuration.waypoint_publisher_path def __wait_carla(self): """Wait for simulator to be initialized""" @@ -129,8 +130,11 @@ def run(self): control_command.command = 1 # PAUSE control_pub.publish(control_command) + self.waypoint_publisher = None while not self.kill_event.is_set(): if not self.stop_event.is_set(): + if self.waypoint_publisher is None and self.waypoint_publisher_path is not None: + self.waypoint_publisher = subprocess.Popen(["roslaunch", ROOT_PATH + '/' + self.waypoint_publisher_path]) control_pub = rospy.Publisher('/carla/control', CarlaControl, queue_size=1) control_command = CarlaControl() if self.async_mode: diff --git a/behavior_metrics/utils/configuration.py b/behavior_metrics/utils/configuration.py index ceb0cf48..5a4bbafc 100644 --- a/behavior_metrics/utils/configuration.py +++ b/behavior_metrics/utils/configuration.py @@ -95,6 +95,10 @@ def initialize_configuration(self, config_data): self.robot_type = robot['Type'] self.pilot_time_cycle = robot['PilotTimeCycle'] self.current_world = config_data['Behaviors']['Simulation']['World'] + if 'WaypointPublisher' in config_data['Behaviors']['Simulation']: + self.waypoint_publisher_path = config_data['Behaviors']['Simulation']['WaypointPublisher'] + else: + self.waypoint_publisher_path = None if 'RealTimeUpdateRate' in config_data['Behaviors']['Simulation']: self.real_time_update_rate = config_data['Behaviors']['Simulation']['RealTimeUpdateRate'] else: diff --git a/behavior_metrics/utils/controller_carla.py b/behavior_metrics/utils/controller_carla.py index 6a3c1156..3693ae67 100644 --- a/behavior_metrics/utils/controller_carla.py +++ b/behavior_metrics/utils/controller_carla.py @@ -73,14 +73,23 @@ def __init__(self): self.cvbridge = CvBridge() client = carla.Client('localhost', 2000) - client.set_timeout(10.0) # seconds + client.set_timeout(100.0) # seconds self.world = client.get_world() time.sleep(5) self.carla_map = self.world.get_map() while len(self.world.get_actors().filter('vehicle.*')) == 0: logger.info("Waiting for vehicles!") time.sleep(1) - self.ego_vehicle = self.world.get_actors().filter('vehicle.*')[0] + ego_vehicle_role_name = "ego_vehicle" + self.ego_vehicle = None + while self.ego_vehicle is None: + for vehicle in self.world.get_actors().filter('vehicle.*'): + if vehicle.attributes.get('role_name') == ego_vehicle_role_name: + self.ego_vehicle = vehicle + break + if self.ego_vehicle is None: + logger.info("Waiting for vehicle with role_name 'ego_vehicle'") + time.sleep(1) # sleep for 1 second before checking again self.map_waypoints = self.carla_map.generate_waypoints(0.5) self.weather = self.world.get_weather() From 4f471b0b6902177151ade6c14828cd4ea366fe81 Mon Sep 17 00:00:00 2001 From: enrique Date: Wed, 3 May 2023 16:05:49 +0200 Subject: [PATCH 18/36] changed the model --- .../brain_carla_subjective_vision_deep_learning_previous_v.py | 2 +- behavior_metrics/configs/default_carla_subjective_vision.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py index f088eccd..32211898 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py +++ b/behavior_metrics/brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py @@ -74,7 +74,7 @@ def __init__(self, sensors, actuators, handler, model, config=None): client = carla.Client('localhost', 2000) client.set_timeout(10.0) # seconds self.world = client.get_world() - self.world.unload_map_layer(carla.MapLayer.Buildings) + self.world.unload_map_layer(carla.MapLayer.Particles) time.sleep(5) self.vehicle = self.world.get_actors().filter('vehicle.*')[0] diff --git a/behavior_metrics/configs/default_carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml index 2dd57817..2db6fc2b 100644 --- a/behavior_metrics/configs/default_carla_subjective_vision.yml +++ b/behavior_metrics/configs/default_carla_subjective_vision.yml @@ -37,7 +37,7 @@ Behaviors: PilotTimeCycle: 100 AsyncMode: False Parameters: - Model: '20230107-205705_rgb_brakingsimple_71_58k.h5' + Model: '20230428-103315_pilotnet_model_3_151_cp.h5' ImageCropped: True ImageSize: [ 200,66 ] ImageNormalized: True From 18d981adffad28d0dc797fefbe00ad51af7df3ce Mon Sep 17 00:00:00 2001 From: Qi Zhao Date: Wed, 3 May 2023 17:53:23 +0000 Subject: [PATCH 19/36] added object file for ad agent --- .../CARLA_object_files/single_ad_car.json | 178 ++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json new file mode 100644 index 00000000..46758eb4 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/single_ad_car.json @@ -0,0 +1,178 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "vehicle.audi.a2", + "id": "npc_vehicle_1", + "spawn_point": {"x": 60.0, "y": 2.0, "z": 1.37, "roll": 0.0, "pitch": 0.0, "yaw": 180.0}, + "sensors": + [ + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + } + ] + } + ] +} \ No newline at end of file From 2dcc2cdbd5b17fe0d012dbc877fb2728b36ad851 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 3 May 2023 17:56:53 +0000 Subject: [PATCH 20/36] Added Torch TensorRT and optimized brains for CARLA --- ..._carla_bird_eye_deep_learning_tensor_rt.py | 5 + ..._eye_deep_learning_torch_tensorrt_float.py | 158 +++++++++++++++++ ..._eye_deep_learning_torch_tensorrt_int_8.py | 164 ++++++++++++++++++ 3 files changed, 327 insertions(+) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py index a597449c..9c68d36f 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py @@ -64,7 +64,12 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("** Load TF model **") logger.info("Using TensorRT models.....") + print() + print(tf.__file__) + print() + print(PRETRAINED_MODELS + model) self.net = tf.saved_model.load(PRETRAINED_MODELS + model) + print('llega!') self.infer = self.net.signatures['serving_default'] self.output_tensorname = list(self.infer.structured_outputs.keys())[0] self.inf_func = self.tftrt_inference diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py new file mode 100644 index 00000000..e893b47b --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -0,0 +1,158 @@ +from torchvision import transforms +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.first_image = None + self.transformations = transforms.Compose([ + transforms.ToTensor() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + if config['UseOptimized']: + print('Loading optimized model...') + print(PRETRAINED_MODELS + model) + + # Float y half TensorRT + self.net = torch.jit.load(PRETRAINED_MODELS + model).eval().to(self.device) +# self.clean_model() + else: + self.net = PilotNet((200,66,3), 3).to(self.device) + self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + img = Image.fromarray(img) + image = self.transformations(img).unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + #print(prediction) + #print(time.time() - start_time) + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py new file mode 100644 index 00000000..b81e2318 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -0,0 +1,164 @@ +from torchvision import transforms +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.first_image = None + self.transformations = transforms.Compose([ + transforms.ToTensor() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + if config['UseOptimized']: + print('Loading optimized model TensorRT INT8...') + print(PRETRAINED_MODELS + model) + + # INT 8 TensorRT + import torch_tensorrt + self.net = torch.jit.load(PRETRAINED_MODELS + model).eval().to(self.device) + compile_spec = {"inputs": [torch_tensorrt.Input([1, 3, 200, 66])], + "enabled_precisions": torch.int8, + } + self.net = torch_tensorrt.compile(self.net, **compile_spec).to(self.device) + +# self.clean_model() + else: + self.net = PilotNet((200,66,3), 3).to(self.device) + self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + img = Image.fromarray(img) + image = self.transformations(img).unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + #print(prediction) + #print(time.time() - start_time) + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + From 8435764b5552ede2d3efab9f49afa72ae0519f71 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 3 May 2023 18:14:18 +0000 Subject: [PATCH 21/36] Updated TensorRT INT8 brain --- ..._eye_deep_learning_torch_tensorrt_int_8.py | 4 +- .../CARLA/brain_carla_segmented_image.py | 198 ++++++++++++++++++ .../configs/default_carla_tf_lite.yml | 2 +- 3 files changed, 200 insertions(+), 4 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_segmented_image.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py index b81e2318..117e9a96 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -41,7 +41,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.cont = 0 self.inference_times = [] self.gpu_inference = config['GPU'] - self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.device = torch.device('cuda') self.first_image = None self.transformations = transforms.Compose([ transforms.ToTensor() @@ -72,8 +72,6 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): "enabled_precisions": torch.int8, } self.net = torch_tensorrt.compile(self.net, **compile_spec).to(self.device) - -# self.clean_model() else: self.net = PilotNet((200,66,3), 3).to(self.device) self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) diff --git a/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py b/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py new file mode 100644 index 00000000..62a28441 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import DATASETS_DIR, ROOT_PATH + + + +GENERATED_DATASETS_DIR = ROOT_PATH + '/' + DATASETS_DIR + + +class Brain: + + def __init__(self, sensors, actuators, handler, config=None): + self.camera = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + self.lock = threading.Lock() + self.threshold_image_lock = threading.Lock() + self.color_image_lock = threading.Lock() + self.cont = 0 + self.iteration = 0 + + # self.previous_timestamp = 0 + # self.previous_image = 0 + + self.previous_v = None + self.previous_w = None + self.previous_w_normalized = None + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + time.sleep(3) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + self.counter = 0 + + traffic_lights = world.get_actors().filter('traffic.traffic_light') + traffic_speed_limits = world.get_actors().filter('traffic.speed_limit*') + print(traffic_speed_limits) + for traffic_light in traffic_lights: + traffic_light.set_green_time(20000) + traffic_light.set_state(carla.TrafficLightState.Green) + + for speed_limit in traffic_speed_limits: + success = speed_limit.destroy() + print(success) + + + route = ["Straight", "Straight", "Straight", "Straight", "Straight", + "Straight", "Straight", "Straight", "Straight", "Straight", + "Straight", "Straight", "Straight", "Straight", "Straight", + "Straight", "Straight", "Straight", "Straight", "Straight", + "Straight", "Straight", "Straight", "Straight", "Straight", + "Straight", "Straight", "Straight", "Straight", "Straight"] + traffic_manager = client.get_trafficmanager() + traffic_manager.set_route(self.vehicle, route) + self.vehicle.set_autopilot(True) + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def get_center(self, lines): + #print('ENTRA!') + try: + point = np.divide(np.max(np.nonzero(lines)) - np.min(np.nonzero(lines)), 2) + return np.min(np.nonzero(lines)) + point + except ValueError: + print(f"No lines detected in the image") + return 0 + + def execute(self): + image_2 = self.camera_2.getImage().data + hsv_nemo = cv2.cvtColor(image_2, cv2.COLOR_RGB2HSV) + + + unique_values=set( tuple(v) for m2d in hsv_nemo for v in m2d ) + #print(unique_values) + #print(len(unique_values)) + + + + light_sidewalk = (151, 217, 243) + dark_sidewalk = (153, 219, 245) + + light_pavement = (149, 127, 127) + dark_pavement = (151, 129, 129) + + mask_sidewalk = cv2.inRange(hsv_nemo, light_sidewalk, dark_sidewalk) + result_sidewalk = cv2.bitwise_and(image_2, image_2, mask=mask_sidewalk) + + mask_pavement = cv2.inRange(hsv_nemo, light_pavement, dark_pavement) + result_pavement = cv2.bitwise_and(image_2, image_2, mask=mask_pavement) + + + # Adjust according to your adjacency requirement. + kernel = np.ones((3, 3), dtype=np.uint8) + + # Dilating masks to expand boundary. + color1_mask = cv2.dilate(mask_sidewalk, kernel, iterations=1) + color2_mask = cv2.dilate(mask_pavement, kernel, iterations=1) + + # Required points now will have both color's mask val as 255. + common = cv2.bitwise_and(color1_mask, color2_mask) + SOME_THRESHOLD = 10 + + # Common is binary np.uint8 image, min = 0, max = 255. + # SOME_THRESHOLD can be anything within the above range. (not needed though) + # Extract/Use it in whatever way you want it. + intersection_points = np.where(common > SOME_THRESHOLD) + + # Say you want these points in a list form, then you can do this. + pts_list = [[r, c] for r, c in zip(*intersection_points)] + #print(pts_list) + + #for x, y in pts_list: + # image_2[x][y] = (255, 0, 0) + + red_line_mask = np.zeros((70, 400, 3), dtype=np.uint8) + + for x, y in pts_list: + red_line_mask[x][y] = (255, 0, 0) + + + ########################################################################################## + + #x_row = [10,60,110] + x_row = [10,30,60] + + + x_row = [50, 60, 69] + + mask = red_line_mask + + print(mask.shape) + + lines = [mask[x_row[idx], :] for idx, x in enumerate(x_row)] + #print(lines) + #print(lines[0]) + #print(type(lines[0])) + #print(lines[0].shape) + unique_values=set( tuple(m2d) for m2d in lines[0] ) + unique_values=set( tuple(m2d) for m2d in lines[1] ) + unique_values=set( tuple(m2d) for m2d in lines[2] ) + + #centrals = list(map(self.get_center, lines)) + + #point = np.divide(np.max(np.nonzero(lines)) - np.min(np.nonzero(lines)), 2) + #print(point) + #a = np.min(np.nonzero(lines)) + point + #print(a) + + centrals = list(map(self.get_center, lines)) + print(centrals) + + for idx, central in enumerate(centrals): + red_line_mask[idx][centrals[0]] = (0, 255, 0) + + + ########################################################################################## + + self.update_frame('frame_0', red_line_mask) + self.update_frame('frame_1', result_pavement) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', result_sidewalk) + + diff --git a/behavior_metrics/configs/default_carla_tf_lite.yml b/behavior_metrics/configs/default_carla_tf_lite.yml index 911595ef..615bf008 100644 --- a/behavior_metrics/configs/default_carla_tf_lite.yml +++ b/behavior_metrics/configs/default_carla_tf_lite.yml @@ -37,7 +37,7 @@ Behaviors: PilotTimeCycle: 1 AsyncMode: False Parameters: - Model: 'optimized_pilotnet_models/pilotnet_dynamic_quant.tflite' + Model: 'optimized_pilotnet_models_tensorflow/28_04_pilotnet_dynamic_quant.tflite' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True From b82157655bedcd5ed26a7d74654996eeb70a4d1f Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 5 May 2023 09:29:18 +0000 Subject: [PATCH 22/36] Torch brains updated with top speed --- .../brain_carla_bird_eye_deep_learning_torch.py | 17 +++++++++++------ ...rd_eye_deep_learning_torch_tensorrt_float.py | 17 +++++++++++------ ...rd_eye_deep_learning_torch_tensorrt_int_8.py | 17 +++++++++++------ 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py index 13373d43..f55c71c9 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py @@ -137,14 +137,19 @@ def execute(self): speed = self.vehicle.get_velocity() vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) - if vehicle_speed < 5: - self.motors.sendThrottle(1.0) - self.motors.sendSteer(0.0) - self.motors.sendBrake(0) - else: - self.motors.sendThrottle(throttle) + if vehicle_speed > 30: + self.motors.sendThrottle(0) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) except Exception as err: print(err) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py index e893b47b..0d32c244 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -143,14 +143,19 @@ def execute(self): speed = self.vehicle.get_velocity() vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) - if vehicle_speed < 5: - self.motors.sendThrottle(1.0) - self.motors.sendSteer(0.0) - self.motors.sendBrake(0) - else: - self.motors.sendThrottle(throttle) + if vehicle_speed > 30: + self.motors.sendThrottle(0) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) except Exception as err: print(err) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py index 117e9a96..ca07c9e8 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -147,14 +147,19 @@ def execute(self): speed = self.vehicle.get_velocity() vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) - if vehicle_speed < 5: - self.motors.sendThrottle(1.0) - self.motors.sendSteer(0.0) - self.motors.sendBrake(0) - else: - self.motors.sendThrottle(throttle) + if vehicle_speed > 30: + self.motors.sendThrottle(0) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) except Exception as err: print(err) From 382eabe6720c18bf4da4dda2f2bd86a0ca66ffcf Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 5 May 2023 10:54:34 +0000 Subject: [PATCH 23/36] Updated image transformations for PyTorch --- ...rain_carla_bird_eye_deep_learning_torch.py | 20 ++++++++++++------- ..._eye_deep_learning_torch_tensorrt_float.py | 17 ++++++++++------ ..._eye_deep_learning_torch_tensorrt_int_8.py | 16 +++++++++------ 3 files changed, 34 insertions(+), 19 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py index f55c71c9..75e8ac6d 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py @@ -1,8 +1,11 @@ -from torchvision import transforms from PIL import Image from brains.f1.torch_utils.pilotnet import PilotNet from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 import numpy as np @@ -43,8 +46,9 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.gpu_inference = config['GPU'] self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') self.first_image = None - self.transformations = transforms.Compose([ - transforms.ToTensor() + self.transformations = Compose([ + Normalize(), + ToTensorV2() ]) self.suddenness_distance = [] @@ -119,15 +123,17 @@ def execute(self): self.update_frame('frame_0', bird_eye_view_1) try: - #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) img = cv2.resize(bird_eye_view_1, (int(66), int(200))) - img = Image.fromarray(img) - image = self.transformations(img).unsqueeze(0) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) image = FLOAT(image).to(self.device) start_time = time.time() with torch.no_grad(): prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + print(prediction) + #print(time.time() - start_time) self.inference_times.append(time.time() - start_time) throttle = prediction[0][0] @@ -147,7 +153,7 @@ def execute(self): self.motors.sendSteer(0.0) self.motors.sendBrake(0) else: - self.motors.sendThrottle(throttle) + self.motors.sendThrottle(0.5) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py index 0d32c244..815c23cb 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -1,8 +1,11 @@ -from torchvision import transforms from PIL import Image from brains.f1.torch_utils.pilotnet import PilotNet from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 import numpy as np @@ -43,8 +46,9 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.gpu_inference = config['GPU'] self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') self.first_image = None - self.transformations = transforms.Compose([ - transforms.ToTensor() + self.transformations = Compose([ + Normalize(), + ToTensorV2() ]) self.suddenness_distance = [] @@ -123,10 +127,11 @@ def execute(self): self.update_frame('frame_0', bird_eye_view_1) try: - #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) img = cv2.resize(bird_eye_view_1, (int(66), int(200))) - img = Image.fromarray(img) - image = self.transformations(img).unsqueeze(0) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) + image = FLOAT(image).to(self.device) start_time = time.time() diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py index ca07c9e8..51487230 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -1,8 +1,11 @@ -from torchvision import transforms from PIL import Image from brains.f1.torch_utils.pilotnet import PilotNet from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 import numpy as np @@ -43,8 +46,9 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.gpu_inference = config['GPU'] self.device = torch.device('cuda') self.first_image = None - self.transformations = transforms.Compose([ - transforms.ToTensor() + self.transformations = Compose([ + Normalize(), + ToTensorV2() ]) self.suddenness_distance = [] @@ -127,10 +131,10 @@ def execute(self): self.update_frame('frame_0', bird_eye_view_1) try: - #img = cv2.resize(bird_eye_view_1, (int(200), int(66))) img = cv2.resize(bird_eye_view_1, (int(66), int(200))) - img = Image.fromarray(img) - image = self.transformations(img).unsqueeze(0) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) image = FLOAT(image).to(self.device) start_time = time.time() From c9a2e61c70be9ca077975875d9f8a0510116cd2e Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 5 May 2023 10:55:16 +0000 Subject: [PATCH 24/36] Updated image transformations for PyTorch --- .../brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py | 1 - 1 file changed, 1 deletion(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py index 815c23cb..f07bd540 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -131,7 +131,6 @@ def execute(self): image = self.transformations(image=img) image = image['image'] image = image.unsqueeze(0) - image = FLOAT(image).to(self.device) start_time = time.time() From 2b78fde6e8b44dc289d73299c8191d8510140f6b Mon Sep 17 00:00:00 2001 From: enrique Date: Mon, 8 May 2023 11:03:45 +0200 Subject: [PATCH 25/36] added main car json file --- .../CARLA_object_files/main_car.json | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json diff --git a/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json new file mode 100644 index 00000000..d687d1c9 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/CARLA_object_files/main_car.json @@ -0,0 +1,161 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego_vehicle", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_front", + "spawn_point": {"x": 2.5, "y": 0.0, "z": 0.8, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 640, + "image_size_y": 480 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 20.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "attached_objects": + [ + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + }, + { + "type": "sensor.lidar.ray_cast", + "id": "lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20, + "noise_stddev": 0.0 + }, + { + "type": "sensor.lidar.ray_cast_semantic", + "id": "semantic_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.4, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 50, + "channels": 32, + "points_per_second": 320000, + "upper_fov": 2.0, + "lower_fov": -26.8, + "rotation_frequency": 20 + }, + { + "type": "sensor.other.radar", + "id": "radar_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "semantic_segmentation_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.depth", + "id": "depth_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70 + }, + { + "type": "sensor.camera.dvs", + "id": "dvs_front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "fov": 90.0, + "image_size_x": 400, + "image_size_y": 70, + "positive_threshold": 0.3, + "negative_threshold": 0.3, + "sigma_positive_threshold": 0.0, + "sigma_negative_threshold": 0.0, + "use_log": true, + "log_eps": 0.001 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "lane_invasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + }, + { + "type": "sensor.pseudo.speedometer", + "id": "speedometer" + }, + { + "type": "actor.pseudo.control", + "id": "control" + } + ] + } + ] +} From eae3ef5e1eb9b29177aba44ec256b79eaa417f1d Mon Sep 17 00:00:00 2001 From: enrique Date: Mon, 8 May 2023 12:06:39 +0200 Subject: [PATCH 26/36] added the new object_path functionality to the launch files --- .../town_02_anticlockwise_low.launch | 79 +++++++++++++++++++ .../default_carla_subjective_vision.yml | 2 +- 2 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch new file mode 100644 index 00000000..23f18ef0 --- /dev/null +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/behavior_metrics/configs/default_carla_subjective_vision.yml b/behavior_metrics/configs/default_carla_subjective_vision.yml index 2db6fc2b..6215ddeb 100644 --- a/behavior_metrics/configs/default_carla_subjective_vision.yml +++ b/behavior_metrics/configs/default_carla_subjective_vision.yml @@ -47,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_02_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise_low.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' From ea02514ff4cbc7b0d7cc763be4e693e598206480 Mon Sep 17 00:00:00 2001 From: enrique Date: Mon, 8 May 2023 12:27:30 +0200 Subject: [PATCH 27/36] added the option to launch CARLA on low or high --- .../CARLA_launch_files/town_02_anticlockwise_low.launch | 1 + behavior_metrics/utils/environment.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch index 23f18ef0..30b69148 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_low.launch @@ -4,6 +4,7 @@ + diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py index a598ba02..02bf0a71 100644 --- a/behavior_metrics/utils/environment.py +++ b/behavior_metrics/utils/environment.py @@ -56,7 +56,13 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con spawn_point.attrib['default'] = random.choice(CARLA_TOWNS_SPAWN_POINTS[town.attrib['default']]) tree.write('tmp_circuit.launch') with open("/tmp/.carlalaunch_stdout.log", "w") as out, open("/tmp/.carlalaunch_stderr.log", "w") as err: - subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + tree = ET.parse(ROOT_PATH + '/' + launch_file) + root = tree.getroot() + quality = root.find(".//*[@name=\"quality\"]") + if quality == None: + subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err) + elif quality.attrib['default'] == 'Low': + subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) #subprocess.Popen(["/home/jderobot/Documents/Projects/carla_simulator_0_9_13/CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err) logger.info("SimulatorEnv: launching simulator server.") time.sleep(5) From 6acc77f8dd4d3ca2ac815fa90346f7ac26c49967 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 17 May 2023 16:24:36 +0200 Subject: [PATCH 28/36] Updated TF Lite brain --- .../brain_carla_bird_eye_deep_learning_tf_lite.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py index 80d4a15c..f2a30d0c 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py @@ -89,12 +89,22 @@ def optim_inference(self, img): output -- prediction from the model """ # Pre-processing + if self.net.get_input_details()[0]['dtype'] == np.uint8: + input_scale, input_zero_point = self.net.get_input_details()[0]["quantization"] + img = img / input_scale + input_zero_point + img = img.astype(self.net.get_input_details()[0]["dtype"]) + self.net.set_tensor(self.input_index, img) # Run inference. self.net.invoke() # Post-processing output = self.net.get_tensor(self.output_index) + if self.net.get_input_details()[0]['dtype'] == np.uint8: + output_scale, input_zero_point = self.net.get_output_details()[0]["quantization"] + output = output.astype(np.float32) + output = output * output_scale + input_zero_point + return output def update_frame(self, frame_id, data): From d5d4831f69251c2e2d1d462185889ef8679d653a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 22:21:09 +0000 Subject: [PATCH 29/36] Bump requests from 2.21.0 to 2.31.0 Bumps [requests](https://github.com/psf/requests) from 2.21.0 to 2.31.0. - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.21.0...v2.31.0) --- updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fff71eb4..bae06fb7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,7 +19,7 @@ netifaces==0.10.9 Pillow==9.3.0 pyglet==1.5.0 gym==0.17.3 -requests==2.21.0 +requests==2.31.0 six>=1.14.0 empy==3.3.2 vcstool==0.2.14 From aebfd24af4fc2c325158b0512f7cf1f7aad0462e Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 24 May 2023 15:36:55 +0200 Subject: [PATCH 30/36] Updated int8 quantization output conversion --- .../CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py index f2a30d0c..9e842073 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py @@ -103,7 +103,7 @@ def optim_inference(self, img): if self.net.get_input_details()[0]['dtype'] == np.uint8: output_scale, input_zero_point = self.net.get_output_details()[0]["quantization"] output = output.astype(np.float32) - output = output * output_scale + input_zero_point + output = (output - input_zero_point) * output_scale return output @@ -164,7 +164,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ From d320d29431a203228227f398360ac1751d209795 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 26 May 2023 12:48:32 +0200 Subject: [PATCH 31/36] Check if attribute exists before using when checking recorded metrics --- behavior_metrics/utils/controller_carla.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/behavior_metrics/utils/controller_carla.py b/behavior_metrics/utils/controller_carla.py index 3693ae67..053b4527 100644 --- a/behavior_metrics/utils/controller_carla.py +++ b/behavior_metrics/utils/controller_carla.py @@ -311,7 +311,7 @@ def stop_recording_metrics(self): target_brain_iterations_real_time = 1 / (self.pilot.time_cycle / 1000) - if self.pilot.brains.active_brain.cameras_first_images != []: + if hasattr(self.pilot.brains.active_brain, 'cameras_first_images') and self.pilot.brains.active_brain.cameras_first_images != []: first_images = self.pilot.brains.active_brain.cameras_first_images last_images = self.pilot.brains.active_brain.cameras_last_images else: From 1a259804fdc229831cb8b8a57a0e030d20677151 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 28 Jun 2023 15:27:34 +0200 Subject: [PATCH 32/36] Updated brains --- .../brains/CARLA/brain_carla_bird_eye_deep_learning.py | 2 +- .../brain_carla_bird_eye_deep_learning_tensor_rt.py | 8 +++++--- .../brain_carla_bird_eye_deep_learning_tf_lite.py | 10 +++++----- .../CARLA/brain_carla_bird_eye_deep_learning_torch.py | 4 +--- ...arla_bird_eye_deep_learning_torch_tensorrt_float.py | 2 -- ...arla_bird_eye_deep_learning_torch_tensorrt_int_8.py | 3 +-- 6 files changed, 13 insertions(+), 16 deletions(-) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py index 19fbedfd..58a4cdee 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py @@ -134,7 +134,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py index 9c68d36f..1934174c 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py @@ -19,6 +19,8 @@ from tensorflow.python.framework.errors_impl import NotFoundError from tensorflow.python.framework.errors_impl import UnimplementedError +from tensorflow.python.saved_model import signature_constants +from tensorflow.python.saved_model import tag_constants import tensorflow as tf #import os @@ -68,9 +70,9 @@ def __init__(self, sensors, actuators, handler, model, config=None): print(tf.__file__) print() print(PRETRAINED_MODELS + model) - self.net = tf.saved_model.load(PRETRAINED_MODELS + model) print('llega!') - self.infer = self.net.signatures['serving_default'] + self.net = tf.saved_model.load(PRETRAINED_MODELS + model, tags=[tag_constants.SERVING]) + self.infer = self.net.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self.output_tensorname = list(self.infer.structured_outputs.keys())[0] self.inf_func = self.tftrt_inference @@ -155,7 +157,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py index 9e842073..af6517e5 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py @@ -24,9 +24,9 @@ import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -gpus = tf.config.experimental.list_physical_devices('GPU') -for gpu in gpus: - tf.config.experimental.set_memory_growth(gpu, True) +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) class Brain: @@ -66,7 +66,7 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("Using TF lite models.....") self.net = tf.lite.Interpreter(model_path= PRETRAINED_MODELS + model) self.net.allocate_tensors() - self.input_index = self.net.get_input_details()[0]["index"] + self.input_index = self.net.get_input_details()[0] self.output_index = self.net.get_output_details()[0]["index"] self.inf_func = self.optim_inference @@ -94,7 +94,7 @@ def optim_inference(self, img): img = img / input_scale + input_zero_point img = img.astype(self.net.get_input_details()[0]["dtype"]) - self.net.set_tensor(self.input_index, img) + self.net.set_tensor(self.input_index["index"], img.astype(self.input_index["dtype"])) # Run inference. self.net.invoke() # Post-processing diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py index 75e8ac6d..881ec4db 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py @@ -132,8 +132,6 @@ def execute(self): start_time = time.time() with torch.no_grad(): prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() - print(prediction) - #print(time.time() - start_time) self.inference_times.append(time.time() - start_time) throttle = prediction[0][0] @@ -153,7 +151,7 @@ def execute(self): self.motors.sendSteer(0.0) self.motors.sendBrake(0) else: - self.motors.sendThrottle(0.5) + self.motors.sendThrottle(throttle) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py index f07bd540..581b61d6 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py @@ -136,8 +136,6 @@ def execute(self): start_time = time.time() with torch.no_grad(): prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() - #print(prediction) - #print(time.time() - start_time) self.inference_times.append(time.time() - start_time) throttle = prediction[0][0] diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py index 51487230..4cd278bf 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py @@ -140,8 +140,7 @@ def execute(self): start_time = time.time() with torch.no_grad(): prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() - #print(prediction) - #print(time.time() - start_time) + self.inference_times.append(time.time() - start_time) throttle = prediction[0][0] From 74415f6857d5592246bf7961ed8e3e09e87b960d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 6 Jul 2023 22:01:36 +0000 Subject: [PATCH 33/36] Bump scipy from 1.4.1 to 1.10.0 Bumps [scipy](https://github.com/scipy/scipy) from 1.4.1 to 1.10.0. - [Release notes](https://github.com/scipy/scipy/releases) - [Commits](https://github.com/scipy/scipy/compare/v1.4.1...v1.10.0) --- updated-dependencies: - dependency-name: scipy dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fff71eb4..62384213 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ PyQt5==5.15.0 pyQt5-sip==12.8.1 -scipy==1.4.1 +scipy==1.10.0 jupyterlab==2.2.10 PyQt3D==5.15.0 npyscreen==4.10.5 From 84ade29e5d3c722d047077435ca7f6eb82d5b261 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 6 Sep 2023 10:46:40 +0200 Subject: [PATCH 34/36] Carla brains updated --- ..._carla_bird_eye_deep_learning_tensor_rt.py | 5 - ..._bird_eye_deep_learning_torch_optimized.py | 162 ++++++++++++++ .../CARLA/brain_carla_segmented_image.py | 198 ------------------ 3 files changed, 162 insertions(+), 203 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py delete mode 100644 behavior_metrics/brains/CARLA/brain_carla_segmented_image.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py index 1934174c..baeeb01c 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py @@ -66,11 +66,6 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("** Load TF model **") logger.info("Using TensorRT models.....") - print() - print(tf.__file__) - print() - print(PRETRAINED_MODELS + model) - print('llega!') self.net = tf.saved_model.load(PRETRAINED_MODELS + model, tags=[tag_constants.SERVING]) self.infer = self.net.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] self.output_tensorname = list(self.infer.structured_outputs.keys())[0] diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py new file mode 100644 index 00000000..892b53a7 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py @@ -0,0 +1,162 @@ +from PIL import Image +from brains.f1.torch_utils.pilotnet import PilotNet +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from os import path +from albumentations import ( + Compose, Normalize +) +from albumentations.pytorch.transforms import ToTensorV2 + +import numpy as np + +import torch +import torchvision +import cv2 +import time +import os +import math +import carla + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' +FLOAT = torch.FloatTensor + +class Brain: + """Specific brain for the CARLA robot. See header.""" + + def __init__(self, sensors, actuators, model=None, handler=None, config=None): + """Constructor of the class. + + Arguments: + sensors {robot.sensors.Sensors} -- Sensors instance of the robot + actuators {robot.actuators.Actuators} -- Actuators instance of the robot + + Keyword Arguments: + handler {brains.brain_handler.Brains} -- Handler of the current brain. Communication with the controller + (default: {None}) + """ + self.motors = actuators.get_motor('motors_0') + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + self.handler = handler + self.cont = 0 + self.inference_times = [] + self.gpu_inference = config['GPU'] + self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu') + self.first_image = None + self.transformations = Compose([ + Normalize(), + ToTensorV2() + ]) + + self.suddenness_distance = [] + self.previous_v = None + self.previous_w = None + + if config: + if 'ImageCrop' in config.keys(): + self.cropImage = config['ImageCrop'] + else: + self.cropImage = True + + if model: + if not path.exists(PRETRAINED_MODELS + model): + print("File " + model + " cannot be found in " + PRETRAINED_MODELS) + + #if config['UseOptimized']: + self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device) +# self.clean_model() + #else: + # self.net = PilotNet((200,66,3), 3).to(self.device) + # self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device)) + # self.net.eval() + else: + print("Brain not loaded") + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def execute(self): + """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)""" + + self.cont += 1 + + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + try: + img = cv2.resize(bird_eye_view_1, (int(66), int(200))) + image = self.transformations(image=img) + image = image['image'] + image = image.unsqueeze(0) + image = FLOAT(image).to(self.device) + + start_time = time.time() + with torch.no_grad(): + prediction = self.net(image).cpu().numpy() if self.gpu_inference else self.net(image).numpy() + self.inference_times.append(time.time() - start_time) + + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0) + self.motors.sendSteer(steer) + self.motors.sendBrake(1.0) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except Exception as err: + print(err) + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py b/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py deleted file mode 100644 index 62a28441..00000000 --- a/behavior_metrics/brains/CARLA/brain_carla_segmented_image.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -import csv -import cv2 -import math -import numpy as np -import threading -import time -import carla -from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare -) -from utils.constants import DATASETS_DIR, ROOT_PATH - - - -GENERATED_DATASETS_DIR = ROOT_PATH + '/' + DATASETS_DIR - - -class Brain: - - def __init__(self, sensors, actuators, handler, config=None): - self.camera = sensors.get_camera('camera_0') - self.camera_1 = sensors.get_camera('camera_1') - self.camera_2 = sensors.get_camera('camera_2') - self.camera_3 = sensors.get_camera('camera_3') - - self.pose = sensors.get_pose3d('pose3d_0') - - self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') - - self.motors = actuators.get_motor('motors_0') - self.handler = handler - self.config = config - - self.threshold_image = np.zeros((640, 360, 3), np.uint8) - self.color_image = np.zeros((640, 360, 3), np.uint8) - self.lock = threading.Lock() - self.threshold_image_lock = threading.Lock() - self.color_image_lock = threading.Lock() - self.cont = 0 - self.iteration = 0 - - # self.previous_timestamp = 0 - # self.previous_image = 0 - - self.previous_v = None - self.previous_w = None - self.previous_w_normalized = None - - client = carla.Client('localhost', 2000) - client.set_timeout(10.0) # seconds - world = client.get_world() - time.sleep(3) - self.vehicle = world.get_actors().filter('vehicle.*')[0] - - self.counter = 0 - - traffic_lights = world.get_actors().filter('traffic.traffic_light') - traffic_speed_limits = world.get_actors().filter('traffic.speed_limit*') - print(traffic_speed_limits) - for traffic_light in traffic_lights: - traffic_light.set_green_time(20000) - traffic_light.set_state(carla.TrafficLightState.Green) - - for speed_limit in traffic_speed_limits: - success = speed_limit.destroy() - print(success) - - - route = ["Straight", "Straight", "Straight", "Straight", "Straight", - "Straight", "Straight", "Straight", "Straight", "Straight", - "Straight", "Straight", "Straight", "Straight", "Straight", - "Straight", "Straight", "Straight", "Straight", "Straight", - "Straight", "Straight", "Straight", "Straight", "Straight", - "Straight", "Straight", "Straight", "Straight", "Straight"] - traffic_manager = client.get_trafficmanager() - traffic_manager.set_route(self.vehicle, route) - self.vehicle.set_autopilot(True) - - - def update_frame(self, frame_id, data): - """Update the information to be shown in one of the GUI's frames. - - Arguments: - frame_id {str} -- Id of the frame that will represent the data - data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) - """ - self.handler.update_frame(frame_id, data) - - def update_pose(self, pose_data): - self.handler.update_pose3d(pose_data) - - def get_center(self, lines): - #print('ENTRA!') - try: - point = np.divide(np.max(np.nonzero(lines)) - np.min(np.nonzero(lines)), 2) - return np.min(np.nonzero(lines)) + point - except ValueError: - print(f"No lines detected in the image") - return 0 - - def execute(self): - image_2 = self.camera_2.getImage().data - hsv_nemo = cv2.cvtColor(image_2, cv2.COLOR_RGB2HSV) - - - unique_values=set( tuple(v) for m2d in hsv_nemo for v in m2d ) - #print(unique_values) - #print(len(unique_values)) - - - - light_sidewalk = (151, 217, 243) - dark_sidewalk = (153, 219, 245) - - light_pavement = (149, 127, 127) - dark_pavement = (151, 129, 129) - - mask_sidewalk = cv2.inRange(hsv_nemo, light_sidewalk, dark_sidewalk) - result_sidewalk = cv2.bitwise_and(image_2, image_2, mask=mask_sidewalk) - - mask_pavement = cv2.inRange(hsv_nemo, light_pavement, dark_pavement) - result_pavement = cv2.bitwise_and(image_2, image_2, mask=mask_pavement) - - - # Adjust according to your adjacency requirement. - kernel = np.ones((3, 3), dtype=np.uint8) - - # Dilating masks to expand boundary. - color1_mask = cv2.dilate(mask_sidewalk, kernel, iterations=1) - color2_mask = cv2.dilate(mask_pavement, kernel, iterations=1) - - # Required points now will have both color's mask val as 255. - common = cv2.bitwise_and(color1_mask, color2_mask) - SOME_THRESHOLD = 10 - - # Common is binary np.uint8 image, min = 0, max = 255. - # SOME_THRESHOLD can be anything within the above range. (not needed though) - # Extract/Use it in whatever way you want it. - intersection_points = np.where(common > SOME_THRESHOLD) - - # Say you want these points in a list form, then you can do this. - pts_list = [[r, c] for r, c in zip(*intersection_points)] - #print(pts_list) - - #for x, y in pts_list: - # image_2[x][y] = (255, 0, 0) - - red_line_mask = np.zeros((70, 400, 3), dtype=np.uint8) - - for x, y in pts_list: - red_line_mask[x][y] = (255, 0, 0) - - - ########################################################################################## - - #x_row = [10,60,110] - x_row = [10,30,60] - - - x_row = [50, 60, 69] - - mask = red_line_mask - - print(mask.shape) - - lines = [mask[x_row[idx], :] for idx, x in enumerate(x_row)] - #print(lines) - #print(lines[0]) - #print(type(lines[0])) - #print(lines[0].shape) - unique_values=set( tuple(m2d) for m2d in lines[0] ) - unique_values=set( tuple(m2d) for m2d in lines[1] ) - unique_values=set( tuple(m2d) for m2d in lines[2] ) - - #centrals = list(map(self.get_center, lines)) - - #point = np.divide(np.max(np.nonzero(lines)) - np.min(np.nonzero(lines)), 2) - #print(point) - #a = np.min(np.nonzero(lines)) + point - #print(a) - - centrals = list(map(self.get_center, lines)) - print(centrals) - - for idx, central in enumerate(centrals): - red_line_mask[idx][centrals[0]] = (0, 255, 0) - - - ########################################################################################## - - self.update_frame('frame_0', red_line_mask) - self.update_frame('frame_1', result_pavement) - self.update_frame('frame_2', image_2) - self.update_frame('frame_3', result_sidewalk) - - From 9c03df34c326e995e9b47257b28a98776fd83bb4 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 6 Sep 2023 11:09:48 +0200 Subject: [PATCH 35/36] Updated brains to separated folders and config files accordingly --- ...rain_carla_bird_eye_deep_learning_torch.py | 0 ..._bird_eye_deep_learning_torch_optimized.py | 0 ..._eye_deep_learning_torch_tensorrt_float.py | 0 ..._eye_deep_learning_torch_tensorrt_int_8.py | 0 .../brain_carla_bird_eye_deep_learning.py | 0 ...n_carla_bird_eye_deep_learning_V_MAX_30.py | 0 ...carla_bird_eye_deep_learning_previous_v.py | 0 ..._carla_bird_eye_deep_learning_tensor_rt.py | 0 ...in_carla_bird_eye_deep_learning_tf_lite.py | 0 ...e_deep_learning_x3_previous_v_t_t-4_t-9.py | 0 ...rla_bird_eye_deep_learning_x3_t_t-4_t-9.py | 0 ...eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py | 0 .../town_01_anticlockwise.launch | 2 +- .../town_01_clockwise.launch | 2 +- .../town_02_anticlockwise.launch | 4 +- .../town_02_anticlockwise_no_gui.launch | 3 +- .../town_02_clockwise.launch | 3 +- behavior_metrics/configs/default_carla.yml | 12 +- .../configs/default_carla_autopilot.yml | 73 +++++++++++ .../configs/default_carla_multiple.yml | 94 +++----------- .../configs/default_carla_multiple_simple.yml | 2 +- ...arla_multiple_tensorflow_optimizations.yml | 120 ++++++++++++++++++ .../configs/default_carla_parked_bike.yml | 2 +- .../configs/default_carla_parked_bike_car.yml | 2 +- .../configs/default_carla_parked_vehicle.yml | 2 +- .../configs/default_carla_pedestrian.yml | 2 +- ...fault_carla_pedestrian_parked_bike_car.yml | 2 +- .../configs/default_carla_single_ad_npc.yml | 2 +- .../configs/default_carla_tensor_rt.yml | 8 +- .../configs/default_carla_tf_lite.yml | 74 ----------- .../configs/default_carla_torch.yml | 10 +- behavior_metrics/utils/metrics_carla.py | 4 +- 32 files changed, 241 insertions(+), 182 deletions(-) rename behavior_metrics/brains/CARLA/{ => pytorch}/brain_carla_bird_eye_deep_learning_torch.py (100%) rename behavior_metrics/brains/CARLA/{ => pytorch}/brain_carla_bird_eye_deep_learning_torch_optimized.py (100%) rename behavior_metrics/brains/CARLA/{ => pytorch}/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py (100%) rename behavior_metrics/brains/CARLA/{ => pytorch}/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_V_MAX_30.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_previous_v.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_tensor_rt.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_tf_lite.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py (100%) create mode 100644 behavior_metrics/configs/default_carla_autopilot.yml create mode 100644 behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py rename to behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_optimized.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_optimized.py rename to behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_optimized.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py rename to behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_float.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py b/behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py rename to behavior_metrics/brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch_tensorrt_int_8.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_V_MAX_30.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch index 061dd0db..45db5b41 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_anticlockwise.launch @@ -10,7 +10,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch index 9a833b83..7375d454 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_01_clockwise.launch @@ -10,7 +10,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch index 4fa3bebe..ef688bd2 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise.launch @@ -18,7 +18,7 @@ - + @@ -35,7 +35,7 @@ - + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch index 867ad933..d0880035 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch @@ -9,7 +9,8 @@ - + + diff --git a/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch b/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch index 5be79bf0..f3f963c4 100644 --- a/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch +++ b/behavior_metrics/configs/CARLA_launch_files/town_02_clockwise.launch @@ -9,7 +9,8 @@ - + + diff --git a/behavior_metrics/configs/default_carla.yml b/behavior_metrics/configs/default_carla.yml index 670e59ec..a25024b3 100644 --- a/behavior_metrics/configs/default_carla.yml +++ b/behavior_metrics/configs/default_carla.yml @@ -33,13 +33,13 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' - PilotTimeCycle: 50 - AsyncMode: False + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' + PilotTimeCycle: 50 # Turn up to reduce number of control decisions + AsyncMode: True # Set to False to control simulator time Parameters: - Model: '20230125-120238_pilotnet_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_PAPER.h5' + Model: '20230724-125225_pilotnet_CARLA_19_05_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_OPTIMIZATION_PAPER_UPDATED_SHAPE_pretrained_cp.h5' ImageCropped: True - ImageSize: [ 100,50 ] + ImageSize: [ 200,66 ] ImageNormalized: True PredictionsNormalized: True GPU: True @@ -47,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/configs/default_carla_autopilot.yml b/behavior_metrics/configs/default_carla_autopilot.yml new file mode 100644 index 00000000..5421f3ff --- /dev/null +++ b/behavior_metrics/configs/default_carla_autopilot.yml @@ -0,0 +1,73 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + + BrainPath: 'brains/CARLA/brain_carla_autopilot.py' + PilotTimeCycle: 300 + Parameters: + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + RandomSpawnPoint: False + Simulation: + World: configs/CARLA_launch_files/town_01_anticlockwise.launch + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_multiple.yml b/behavior_metrics/configs/default_carla_multiple.yml index 69de86e1..b6c5caf1 100644 --- a/behavior_metrics/configs/default_carla_multiple.yml +++ b/behavior_metrics/configs/default_carla_multiple.yml @@ -35,33 +35,17 @@ Behaviors: MaxW: 0.3 BrainPath: [ - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v.py', - - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py', - 'brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py', ] - PilotTimeCycle: 100 - AsyncMode: False + PilotTimeCycle: 50 + AsyncMode: True Parameters: Model: [ - '20230125-120238_pilotnet_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_adam_AFFINE_PAPER.h5', - '20230126-183457_deepestLSTMTinyPilotNet_CARLA_14_12_dataset_bird_eye_400_epochs_no_flip_3_output_both_directions_all_towns__less_learning_rate_PAPER_cp.h5', - - '20230220-105422_pilotnet_small_more_more_LSTM_x3_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_t_5_t_10_dataset_AFFINE_PAPER_cp.h5', - '20230222-110851_memDCCP_small_LSTM_2_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_both_directions_all_towns_sequences_more_more_extreme_AFFINE_PAPER_cp.h5', - - '20230127-180655_pilotnet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE.h5', - '20230127-180856_deepestLSTMTinyPilotNet_CARLA_17_10_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_all_towns_vel_30_AFFINE_PAPER.h5', - - '20230221-162112_pilotnet_x3_small_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5', - '20230221-175351_memDCCP_small_CARLA_14_12_dataset_bird_eye_300_epochs_no_flip_3_output_velocity_both_directions_all_towns_sequences_more_extreme_previous_V_AFFINE_PAPER_cp.h5' + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_fp32', + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_fp16', + 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_int8_2', ] ImageCropped: True ImageSize: [ 100,50 ] @@ -75,68 +59,20 @@ Behaviors: Name: "Experiment name" Description: "Experiment description" UseWorldTimeouts: True - Timeout: [10, 10, 10, 10, 10, 10, 10] # for each world! + Timeout: [10] # for each world! Repetitions: 5 Simulation: World: [ - configs/CARLA_launch_files/town_01_anticlockwise_no_gui.launch, configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_03_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_04_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_05_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_06_anticlockwise_no_gui.launch, - configs/CARLA_launch_files/town_07_anticlockwise_no_gui.launch ] - RandomSpawnPoint: True + RandomSpawnPoint: False SpawnPoints: [ [ - "10.0, 2.0, 1.37, 0.0, 0.0, 180.0", - "300.0, -330.0, 1.37, 0.0, 0.0, 0.0", - "397.0, -50.0, 1.37, 0.0, 0.0, 90.0", - "392.0, -50.0, 1.37, 0.0, 0.0, -90.0", - "20.0, -327.0, 1.37, 0.0, 0.0, 180.0" - ], - [ - "55.3, -105.6, 1.37, 0.0, 0.0, 180.0", - "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", - "-3.0, -270.6, 1.37, 0.0, 0.0, 90.0", - "100.0, -303.0, 1.37, 0.0, 0.0, 180.0", - "190.0, -150.0, 1.37, 0.0, 0.0, -90.0" - ], - [ - "246.0, 150.0, 1.37, 0.0, 0.0, 90.0", - "243.0, -100., 1.37, 0.0, 0.0, 90.0", - "-88.0, 170, 1.37, 0.0, 0.0, -90.0", - "232.0, 0.0, 1.37, 0.0, 0.0, -90.0", - "-50.0, 195, 1.37, 0.0, 0.0, 0.0" - ], - [ - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", - "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", - "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", - "381.5, 60.0, 1.37, 0.0, 0.0, -90.0" - ], - [ - "20, -187.5, 1.37, 0.0, 0.0, 180.0", - "210.1, -87.3, 1.37, 0.0, 0.0, 90.0", - "189, -87.3, 1.37, 0.0, 0.0, -90.0", - "20, -187.5, 1.37, 0.0, 0.0, 180.0", - "210.1, -87.3, 1.37, 0.0, 0.0, 90.0" - ], - [ - "659.0, -70.5, 1.37, 0.0, 0.0, -90.0", - "351.5, 10.5, 1.37, 0.0, 0.0, 0.0", - "351.5, 24.5, 1.37, 0.0, 0.0, 180.0", - "672.5, -70.5, 1.37, 0.0, 0.0, 90.0", - "659.0, -70.5, 1.37, 0.0, 0.0, -90.0" - ], - [ - "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", - "70.5, 5.0, 1.37, 0.0, 0.0, 60.0", - "-184.5, -107.2, 1.37, 0.0, 0.0, 180.0", - "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", - "70.5, 5.0, 1.37, 0.0, 0.0, 60.0" + "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", + "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", + "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", + "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", + "10.0, -307.0, 1.37, 0.0, 0.0, 0.0" ] ] Dataset: diff --git a/behavior_metrics/configs/default_carla_multiple_simple.yml b/behavior_metrics/configs/default_carla_multiple_simple.yml index 3daed2e5..5b371e25 100644 --- a/behavior_metrics/configs/default_carla_multiple_simple.yml +++ b/behavior_metrics/configs/default_carla_multiple_simple.yml @@ -31,7 +31,7 @@ Behaviors: MaxW: 0.3 BrainPath: [ - 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' ] PilotTimeCycle: 100 Parameters: diff --git a/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml b/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml new file mode 100644 index 00000000..bb014e02 --- /dev/null +++ b/behavior_metrics/configs/default_carla_multiple_tensorflow_optimizations.yml @@ -0,0 +1,120 @@ +Behaviors: + Robot: + Sensors: + Cameras: + Camera_0: + Name: 'camera_0' + Topic: '/carla/ego_vehicle/rgb_front/image' + Camera_1: + Name: 'camera_1' + Topic: '/carla/ego_vehicle/rgb_view/image' + Camera_2: + Name: 'camera_2' + Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' + Camera_3: + Name: 'camera_3' + Topic: '/carla/ego_vehicle/dvs_front/image' + Pose3D: + Pose3D_0: + Name: 'pose3d_0' + Topic: '/carla/ego_vehicle/odometry' + BirdEyeView: + BirdEyeView_0: + Name: 'bird_eye_view_0' + Topic: '' + Speedometer: + Speedometer_0: + Name: 'speedometer_0' + Topic: '/carla/ego_vehicle/speedometer' + Actuators: + CARLA_Motors: + Motors_0: + Name: 'motors_0' + Topic: '/carla/ego_vehicle/vehicle_control_cmd' + MaxV: 3 + MaxW: 0.3 + + BrainPath: [ + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py', + + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tf_lite.py', + ] + PilotTimeCycle: 50 + AsyncMode: True + Parameters: + Model: [ + 'pilotnet.h5', + + 'optimized_pilotnet_models_tensorflow/pilotnet_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_dynamic_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_int_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_intflt_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_float16_quant.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_quant_aware.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pruned.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pruned_quan.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_cqat_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pqat_model.tflite', + 'optimized_pilotnet_models_tensorflow/pilotnet_pcqat_model.tflite' + ] + ImageCropped: True + ImageSize: [ 100,50 ] + ImageNormalized: True + PredictionsNormalized: True + GPU: True + UseOptimized: True + ImageTranform: '' + Type: 'CARLA' + Experiment: + Name: "Experiment name" + Description: "Experiment description" + UseWorldTimeouts: True + Timeout: [10] # for each world! + Repetitions: 5 + Simulation: + World: [ + configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch, + ] + RandomSpawnPoint: False + SpawnPoints: [ + [ + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0" + ] + ] + Dataset: + In: '/tmp/my_bag.bag' + Out: '' + Stats: + Out: './' + PerfectLap: './perfect_bags/lap-simple-circuit.bag' + Layout: + Frame_0: + Name: frame_0 + Geometry: [1, 1, 1, 1] + Data: rgbimage + Frame_1: + Name: frame_1 + Geometry: [0, 1, 1, 1] + Data: rgbimage + Frame_2: + Name: frame_2 + Geometry: [0, 2, 1, 1] + Data: rgbimage + Frame_3: + Name: frame_3 + Geometry: [1, 2, 1, 1] + Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_parked_bike.yml b/behavior_metrics/configs/default_carla_parked_bike.yml index daf30e07..340db817 100644 --- a/behavior_metrics/configs/default_carla_parked_bike.yml +++ b/behavior_metrics/configs/default_carla_parked_bike.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_parked_bike_car.yml b/behavior_metrics/configs/default_carla_parked_bike_car.yml index ebee5b85..7761704d 100644 --- a/behavior_metrics/configs/default_carla_parked_bike_car.yml +++ b/behavior_metrics/configs/default_carla_parked_bike_car.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_parked_vehicle.yml b/behavior_metrics/configs/default_carla_parked_vehicle.yml index fce12b36..e99f3656 100644 --- a/behavior_metrics/configs/default_carla_parked_vehicle.yml +++ b/behavior_metrics/configs/default_carla_parked_vehicle.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_pedestrian.yml b/behavior_metrics/configs/default_carla_pedestrian.yml index d28137b6..92d463ae 100644 --- a/behavior_metrics/configs/default_carla_pedestrian.yml +++ b/behavior_metrics/configs/default_carla_pedestrian.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml index bc931f8d..d3d36a8f 100644 --- a/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml +++ b/behavior_metrics/configs/default_carla_pedestrian_parked_bike_car.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 100 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_single_ad_npc.yml b/behavior_metrics/configs/default_carla_single_ad_npc.yml index 57564423..6b4eb063 100644 --- a/behavior_metrics/configs/default_carla_single_ad_npc.yml +++ b/behavior_metrics/configs/default_carla_single_ad_npc.yml @@ -33,7 +33,7 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py' PilotTimeCycle: 50 AsyncMode: False Parameters: diff --git a/behavior_metrics/configs/default_carla_tensor_rt.yml b/behavior_metrics/configs/default_carla_tensor_rt.yml index 3b860815..6034475d 100644 --- a/behavior_metrics/configs/default_carla_tensor_rt.yml +++ b/behavior_metrics/configs/default_carla_tensor_rt.yml @@ -33,13 +33,13 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tensor_rt.py' + BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_tensor_rt.py' PilotTimeCycle: 1 AsyncMode: False Parameters: - Model: 'pilotnet_tftrt_int8' + Model: 'optimized_pilotnet_models_tensorflow/pilotnet_tftrt_int8_2' ImageCropped: True - ImageSize: [ 100,50 ] + ImageSize: [ 200,66 ] ImageNormalized: True PredictionsNormalized: True GPU: True @@ -47,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/configs/default_carla_tf_lite.yml b/behavior_metrics/configs/default_carla_tf_lite.yml index 615bf008..e69de29b 100644 --- a/behavior_metrics/configs/default_carla_tf_lite.yml +++ b/behavior_metrics/configs/default_carla_tf_lite.yml @@ -1,74 +0,0 @@ -Behaviors: - Robot: - Sensors: - Cameras: - Camera_0: - Name: 'camera_0' - Topic: '/carla/ego_vehicle/rgb_front/image' - Camera_1: - Name: 'camera_1' - Topic: '/carla/ego_vehicle/rgb_view/image' - Camera_2: - Name: 'camera_2' - Topic: '/carla/ego_vehicle/semantic_segmentation_front/image' - Camera_3: - Name: 'camera_3' - Topic: '/carla/ego_vehicle/dvs_front/image' - Pose3D: - Pose3D_0: - Name: 'pose3d_0' - Topic: '/carla/ego_vehicle/odometry' - BirdEyeView: - BirdEyeView_0: - Name: 'bird_eye_view_0' - Topic: '' - Speedometer: - Speedometer_0: - Name: 'speedometer_0' - Topic: '/carla/ego_vehicle/speedometer' - Actuators: - CARLA_Motors: - Motors_0: - Name: 'motors_0' - Topic: '/carla/ego_vehicle/vehicle_control_cmd' - MaxV: 3 - MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_tf_lite.py' - PilotTimeCycle: 1 - AsyncMode: False - Parameters: - Model: 'optimized_pilotnet_models_tensorflow/28_04_pilotnet_dynamic_quant.tflite' - ImageCropped: True - ImageSize: [ 100,50 ] - ImageNormalized: True - PredictionsNormalized: True - GPU: True - UseOptimized: True - ImageTranform: '' - Type: 'CARLA' - Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch - RandomSpawnPoint: False - Dataset: - In: '/tmp/my_bag.bag' - Out: '' - Stats: - Out: './' - PerfectLap: './perfect_bags/lap-simple-circuit.bag' - Layout: - Frame_0: - Name: frame_0 - Geometry: [1, 1, 1, 1] - Data: rgbimage - Frame_1: - Name: frame_1 - Geometry: [0, 1, 1, 1] - Data: rgbimage - Frame_2: - Name: frame_2 - Geometry: [0, 2, 1, 1] - Data: rgbimage - Frame_3: - Name: frame_3 - Geometry: [1, 2, 1, 1] - Data: rgbimage diff --git a/behavior_metrics/configs/default_carla_torch.yml b/behavior_metrics/configs/default_carla_torch.yml index 3fd0ba6c..4479ddad 100644 --- a/behavior_metrics/configs/default_carla_torch.yml +++ b/behavior_metrics/configs/default_carla_torch.yml @@ -33,11 +33,11 @@ Behaviors: Topic: '/carla/ego_vehicle/vehicle_control_cmd' MaxV: 3 MaxW: 0.3 - BrainPath: 'brains/CARLA/brain_carla_bird_eye_deep_learning_torch.py' - PilotTimeCycle: 100 - AsyncMode: False + BrainPath: 'brains/CARLA/pytorch/brain_carla_bird_eye_deep_learning_torch.py' + PilotTimeCycle: 50 + AsyncMode: True Parameters: - Model: 'pilot_net_model_best_17_04_c_3.pth' + Model: 'pilotnet_model.pth' ImageCropped: True ImageSize: [ 100,50 ] ImageNormalized: True @@ -47,7 +47,7 @@ Behaviors: ImageTranform: '' Type: 'CARLA' Simulation: - World: configs/CARLA_launch_files/town_01_anticlockwise.launch + World: configs/CARLA_launch_files/town_02_anticlockwise.launch RandomSpawnPoint: False Dataset: In: '/tmp/my_bag.bag' diff --git a/behavior_metrics/utils/metrics_carla.py b/behavior_metrics/utils/metrics_carla.py index 3fa2e086..549f02bc 100644 --- a/behavior_metrics/utils/metrics_carla.py +++ b/behavior_metrics/utils/metrics_carla.py @@ -584,7 +584,9 @@ def get_per_model_aggregated_metrics(result, experiments_starting_time_str, expe fig.tight_layout() plt.xticks(rotation=90) plt.legend(handles=color_handles) - plt.savefig(experiments_starting_time_str + '/' + unique_experiment_model + '_ ' + experiment_metric_and_title['metric'] + '.png') + if len(unique_experiment_model.split('/')) > 1: + unique_experiment_model = unique_experiment_model.split('/')[-1] + plt.savefig(experiments_starting_time_str + '/' + unique_experiment_model + '_' + experiment_metric_and_title['metric'] + '.png') plt.close() def get_all_experiments_aggregated_metrics_boxplot(result, experiments_starting_time_str, experiments_metrics_and_titles): From 496ce0d1175fb5826c31feea5b96373765171718 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 6 Sep 2023 11:16:14 +0200 Subject: [PATCH 36/36] Updated config multiple --- .../configs/default_carla_multiple.yml | 58 +++++++++++++++++-- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/behavior_metrics/configs/default_carla_multiple.yml b/behavior_metrics/configs/default_carla_multiple.yml index b6c5caf1..d1f9a756 100644 --- a/behavior_metrics/configs/default_carla_multiple.yml +++ b/behavior_metrics/configs/default_carla_multiple.yml @@ -63,16 +63,64 @@ Behaviors: Repetitions: 5 Simulation: World: [ + configs/CARLA_launch_files/town_01_anticlockwise_no_gui.launch, configs/CARLA_launch_files/town_02_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_03_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_04_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_05_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_06_anticlockwise_no_gui.launch, + configs/CARLA_launch_files/town_07_anticlockwise_no_gui.launch ] RandomSpawnPoint: False SpawnPoints: [ [ - "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", - "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", - "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", - "10.0, -307.0, 1.37, 0.0, 0.0, 0.0", - "10.0, -307.0, 1.37, 0.0, 0.0, 0.0" + "10.0, 2.0, 1.37, 0.0, 0.0, 180.0", + "300.0, -330.0, 1.37, 0.0, 0.0, 0.0", + "397.0, -50.0, 1.37, 0.0, 0.0, 90.0", + "392.0, -50.0, 1.37, 0.0, 0.0, -90.0", + "20.0, -327.0, 1.37, 0.0, 0.0, 180.0" + ], + [ + "55.3, -105.6, 1.37, 0.0, 0.0, 180.0", + "-7.0, -270.6, 1.37, 0.0, 0.0, -90.0", + "-3.0, -270.6, 1.37, 0.0, 0.0, 90.0", + "100.0, -303.0, 1.37, 0.0, 0.0, 180.0", + "190.0, -150.0, 1.37, 0.0, 0.0, -90.0" + ], + [ + "246.0, 150.0, 1.37, 0.0, 0.0, 90.0", + "243.0, -100., 1.37, 0.0, 0.0, 90.0", + "-88.0, 170, 1.37, 0.0, 0.0, -90.0", + "232.0, 0.0, 1.37, 0.0, 0.0, -90.0", + "-50.0, 195, 1.37, 0.0, 0.0, 0.0" + ], + [ + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", + "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0", + "-16.0, -184.6, 1.37, 0.0, 0.0, -90.0", + "381.5, 60.0, 1.37, 0.0, 0.0, -90.0" + ], + [ + "20, -187.5, 1.37, 0.0, 0.0, 180.0", + "210.1, -87.3, 1.37, 0.0, 0.0, 90.0", + "189, -87.3, 1.37, 0.0, 0.0, -90.0", + "20, -187.5, 1.37, 0.0, 0.0, 180.0", + "210.1, -87.3, 1.37, 0.0, 0.0, 90.0" + ], + [ + "659.0, -70.5, 1.37, 0.0, 0.0, -90.0", + "351.5, 10.5, 1.37, 0.0, 0.0, 0.0", + "351.5, 24.5, 1.37, 0.0, 0.0, 180.0", + "672.5, -70.5, 1.37, 0.0, 0.0, 90.0", + "659.0, -70.5, 1.37, 0.0, 0.0, -90.0" + ], + [ + "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", + "70.5, 5.0, 1.37, 0.0, 0.0, 60.0", + "-184.5, -107.2, 1.37, 0.0, 0.0, 180.0", + "-3.0, 243.0, 1.37, 0.0, 0.0, 180.0", + "70.5, 5.0, 1.37, 0.0, 0.0, 60.0" ] ] Dataset: