From 933822edd40a058ceb302ced4fad0ba691d3bec4 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Mon, 3 Apr 2023 13:43:13 +0000 Subject: [PATCH 1/3] Added CARLA brains from experiments --- .../brain_carla_bird_eye_deep_learning.py | 31 +- ...in_carla_bird_eye_deep_learning_70_km_h.py | 191 +++++++++++ ...rla_bird_eye_deep_learning_broken_input.py | 195 +++++++++++ ...rd_eye_deep_learning_previous_v_70_km_h.py | 197 +++++++++++ ...e_deep_learning_previous_v_broken_input.py | 201 ++++++++++++ ...e_deep_learning_x3_previous_v_t_t-4_t-9.py | 11 +- ...earning_x3_previous_v_t_t-4_t-9_70_km_h.py | 299 +++++++++++++++++ ...ng_x3_previous_v_t_t-4_t-9_broken_input.py | 305 ++++++++++++++++++ ...rla_bird_eye_deep_learning_x3_t_t-4_t-9.py | 1 + ..._eye_deep_learning_x3_t_t-4_t-9_70_km_h.py | 225 +++++++++++++ ...eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py | 2 +- ...deep_learning_x3_t_t-4_t-9_broken_input.py | 232 +++++++++++++ 12 files changed, 1870 insertions(+), 20 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py index 99db5063..23d0de1e 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -21,12 +21,13 @@ from tensorflow.python.framework.errors_impl import UnimplementedError import tensorflow as tf -import os -os.environ['CUDA_VISIBLE_DEVICES'] = '-1' -#gpus = tf.config.experimental.list_physical_devices('GPU') -#for gpu in gpus: -# tf.config.experimental.set_memory_growth(gpu, True) +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) class Brain: @@ -156,21 +157,15 @@ def execute(self): speed = self.vehicle.get_velocity() vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) - self.previous_speed = vehicle_speed - if vehicle_speed > 30: - self.motors.sendThrottle(0) + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) - else: - if vehicle_speed < 5: - self.motors.sendThrottle(1.0) - self.motors.sendSteer(0.0) - self.motors.sendBrake(0) - else: - self.motors.sendThrottle(throttle) - self.motors.sendSteer(steer) - self.motors.sendBrake(break_command) except NotFoundError as ex: logger.info('Error inside brain: NotFoundError!') logger.warning(type(ex).__name__) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py new file mode 100644 index 00000000..1075b947 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + + +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 70 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py new file mode 100644 index 00000000..9a0926ff --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + + +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py new file mode 100644 index 00000000..0129a22f --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + velocity_dim = np.full((150, 50), self.previous_speed/30) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if vehicle_speed < 70 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py new file mode 100644 index 00000000..793c5963 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + velocity_dim = np.full((150, 50), self.previous_speed/30) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py index f62587e6..bce4b439 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -145,6 +145,15 @@ def execute(self): bird_eye_view_1 ] + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) self.update_frame('frame_2', image_2) self.update_frame('frame_3', image_3) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py new file mode 100644 index 00000000..0ba8c00d --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + + self.image_1_V = 0 + self.image_2_V = 0 + self.image_3_V = 0 + self.image_4_V = 0 + self.image_5_V = 0 + self.image_6_V = 0 + self.image_7_V = 0 + self.image_8_V = 0 + self.image_9_V = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + if type(self.image_1) is int: + self.image_1 = img + self.image_1_V = 0 + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_2) is int: + self.image_2 = img + self.image_2_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_3) is int: + self.image_3 = img + self.image_3_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_4) is int: + self.image_4 = img + self.image_4_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_5) is int: + self.image_5 = img + self.image_5_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_6) is int: + self.image_6 = img + self.image_6_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_7) is int: + self.image_7 = img + self.image_7_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_8) is int: + self.image_8 = img + self.image_8_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_9) is int: + self.image_9 = img + self.image_9_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + else: + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + self.image_1_V = self.image_2_V + self.image_2_V = self.image_3_V + self.image_3_V = self.image_4_V + self.image_4_V = self.image_5_V + self.image_5_V = self.image_6_V + self.image_6_V = self.image_7_V + self.image_7_V = self.image_8_V + self.image_8_V = self.image_9_V + self.image_9_V = self.previous_speed + + velocity_dim_1 = np.full((150, 50), self.image_1_V/30) + image_1 = np.dstack((self.image_1, velocity_dim_1)) + + velocity_dim_4 = np.full((150, 50), self.image_4_V/30) + image_4 = np.dstack((self.image_4, velocity_dim_4)) + + velocity_dim_9 = np.full((150, 50), self.image_9_V/30) + image_9 = np.dstack((self.image_9, velocity_dim_9)) + + img = [image_1, image_4 , image_9] + + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + if vehicle_speed < 70 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py new file mode 100644 index 00000000..bce4b439 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + + self.image_1_V = 0 + self.image_2_V = 0 + self.image_3_V = 0 + self.image_4_V = 0 + self.image_5_V = 0 + self.image_6_V = 0 + self.image_7_V = 0 + self.image_8_V = 0 + self.image_9_V = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + if type(self.image_1) is int: + self.image_1 = img + self.image_1_V = 0 + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_2) is int: + self.image_2 = img + self.image_2_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_3) is int: + self.image_3 = img + self.image_3_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_4) is int: + self.image_4 = img + self.image_4_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_5) is int: + self.image_5 = img + self.image_5_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_6) is int: + self.image_6 = img + self.image_6_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_7) is int: + self.image_7 = img + self.image_7_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_8) is int: + self.image_8 = img + self.image_8_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_9) is int: + self.image_9 = img + self.image_9_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + else: + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + self.image_1_V = self.image_2_V + self.image_2_V = self.image_3_V + self.image_3_V = self.image_4_V + self.image_4_V = self.image_5_V + self.image_5_V = self.image_6_V + self.image_6_V = self.image_7_V + self.image_7_V = self.image_8_V + self.image_8_V = self.image_9_V + self.image_9_V = self.previous_speed + + velocity_dim_1 = np.full((150, 50), self.image_1_V/30) + image_1 = np.dstack((self.image_1, velocity_dim_1)) + + velocity_dim_4 = np.full((150, 50), self.image_4_V/30) + image_4 = np.dstack((self.image_4, velocity_dim_4)) + + velocity_dim_9 = np.full((150, 50), self.image_9_V/30) + image_9 = np.dstack((self.image_9, velocity_dim_9)) + + img = [image_1, image_4 , image_9] + + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py index 4082712e..c6ad5c06 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py @@ -65,6 +65,7 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) logger.info("** Load TF model **") self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + print(self.net.summary()) logger.info("** Loaded TF model **") else: logger.info("** Brain not loaded **") diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py new file mode 100644 index 00000000..f6662c13 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py @@ -0,0 +1,225 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + else: + self.bird_eye_view_images += 1 + if (self.image_9==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + img = [self.image_1, self.image_4, self.image_9] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 70 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py index 7451f324..fb2e0f90 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py @@ -239,7 +239,7 @@ def execute(self): self.motors.sendSteer(0.0) self.motors.sendBrake(0) else: - self.motors.sendThrottle(throttle) + self.motors.sendThrottle(0.75) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py new file mode 100644 index 00000000..6da25b6d --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + print(self.net.summary()) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + else: + self.bird_eye_view_images += 1 + if (self.image_9==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + img = [self.image_1, self.image_4, self.image_9] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file From 831a65333323e782e4191fe84913eb3fa09c0b5e Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Mon, 24 Apr 2023 10:12:57 +0000 Subject: [PATCH 2/3] Added broken input brains --- ...rla_bird_eye_deep_learning_broken_input.py | 4 +- ...a_bird_eye_deep_learning_broken_input_2.py | 195 +++++++++++ ...e_deep_learning_previous_v_broken_input.py | 2 +- ...deep_learning_previous_v_broken_input_2.py | 201 ++++++++++++ ...ng_x3_previous_v_t_t-4_t-9_broken_input.py | 4 +- ..._x3_previous_v_t_t-4_t-9_broken_input_2.py | 306 ++++++++++++++++++ ...ep_learning_x3_t_t-4_t-9_broken_input_2.py | 232 +++++++++++++ 7 files changed, 939 insertions(+), 5 deletions(-) create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py create mode 100644 behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py index 9a0926ff..57d794a4 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -125,7 +125,7 @@ def execute(self): ] AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0) + GridDropout(p=1.0, ratio=0.9) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py new file mode 100644 index 00000000..57d794a4 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py @@ -0,0 +1,195 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + + +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0, ratio=0.9) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py index 793c5963..09040cca 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py @@ -126,7 +126,7 @@ def execute(self): ] AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0) + GridDropout(p=1.0, ratio=0.9) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py new file mode 100644 index 00000000..09040cca --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0, ratio=0.9) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + velocity_dim = np.full((150, 50), self.previous_speed/30) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py index bce4b439..79d50fc5 100644 --- a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -147,7 +147,7 @@ def execute(self): AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0) + GridDropout(p=1.0, ratio=0.9) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py new file mode 100644 index 00000000..e6d78d51 --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py @@ -0,0 +1,306 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout, GaussNoise +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + + self.image_1_V = 0 + self.image_2_V = 0 + self.image_3_V = 0 + self.image_4_V = 0 + self.image_5_V = 0 + self.image_6_V = 0 + self.image_7_V = 0 + self.image_8_V = 0 + self.image_9_V = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + + AUGMENTATIONS_TEST = Compose([ + #GridDropout(p=1.0, ratio=0.5), + GaussNoise(p=1.0, var_limit=(500.0, 1500.0)) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + if type(self.image_1) is int: + self.image_1 = img + self.image_1_V = 0 + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_2) is int: + self.image_2 = img + self.image_2_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_3) is int: + self.image_3 = img + self.image_3_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_4) is int: + self.image_4 = img + self.image_4_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_5) is int: + self.image_5 = img + self.image_5_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_6) is int: + self.image_6 = img + self.image_6_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_7) is int: + self.image_7 = img + self.image_7_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_8) is int: + self.image_8 = img + self.image_8_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_9) is int: + self.image_9 = img + self.image_9_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + else: + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + self.image_1_V = self.image_2_V + self.image_2_V = self.image_3_V + self.image_3_V = self.image_4_V + self.image_4_V = self.image_5_V + self.image_5_V = self.image_6_V + self.image_6_V = self.image_7_V + self.image_7_V = self.image_8_V + self.image_8_V = self.image_9_V + self.image_9_V = self.previous_speed + + velocity_dim_1 = np.full((150, 50), self.image_1_V/30) + image_1 = np.dstack((self.image_1, velocity_dim_1)) + + velocity_dim_4 = np.full((150, 50), self.image_4_V/30) + image_4 = np.dstack((self.image_4, velocity_dim_4)) + + velocity_dim_9 = np.full((150, 50), self.image_9_V/30) + image_9 = np.dstack((self.image_9, velocity_dim_9)) + + img = [image_1, image_4 , image_9] + + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py new file mode 100644 index 00000000..4bd27d0c --- /dev/null +++ b/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py @@ -0,0 +1,232 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'carla_tf_models/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + print(self.net.summary()) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + + AUGMENTATIONS_TEST = Compose([ + GridDropout(p=1.0, ratio=0.9) + ]) + + bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) + bird_eye_view_1 = bird_eye_view_1["image"] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + else: + self.bird_eye_view_images += 1 + if (self.image_9==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + img = [self.image_1, self.image_4, self.image_9] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file From 43e23091e85f1e5a96767577d992c3db42f06cf2 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Wed, 6 Sep 2023 11:29:00 +0200 Subject: [PATCH 3/3] Updated Tensorflow brains --- .../brain_carla_bird_eye_deep_learning_70_km_h.py | 0 ...brain_carla_bird_eye_deep_learning_broken_input.py | 0 ...ain_carla_bird_eye_deep_learning_broken_input_2.py | 0 ...carla_bird_eye_deep_learning_previous_v_70_km_h.py | 0 ..._bird_eye_deep_learning_previous_v_broken_input.py | 0 ...ird_eye_deep_learning_previous_v_broken_input_2.py | 0 ..._bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py | 11 +---------- ...e_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py | 0 ...p_learning_x3_previous_v_t_t-4_t-9_broken_input.py | 0 ...learning_x3_previous_v_t_t-4_t-9_broken_input_2.py | 0 ...brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py | 1 - ...rla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py | 0 ...ird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py | 0 ...d_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py | 0 14 files changed, 1 insertion(+), 11 deletions(-) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_70_km_h.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_broken_input.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_broken_input_2.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py (100%) rename behavior_metrics/brains/CARLA/{ => tensorflow}/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py (100%) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_70_km_h.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_broken_input_2.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py index bce4b439..f62587e6 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -145,15 +145,6 @@ def execute(self): bird_eye_view_1 ] - - AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0) - ]) - - bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) - bird_eye_view_1 = bird_eye_view_1["image"] - - self.update_frame('frame_1', image_1) self.update_frame('frame_2', image_2) self.update_frame('frame_3', image_3) diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_70_km_h.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py index c6ad5c06..4082712e 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py @@ -65,7 +65,6 @@ def __init__(self, sensors, actuators, handler, model, config=None): logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) logger.info("** Load TF model **") self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) - print(self.net.summary()) logger.info("** Loaded TF model **") else: logger.info("** Brain not loaded **") diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input.py diff --git a/behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py similarity index 100% rename from behavior_metrics/brains/CARLA/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py rename to behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_broken_input_2.py