diff --git a/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py b/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py
new file mode 100644
index 00000000..c30e4dbc
--- /dev/null
+++ b/behavior_metrics/brains/CARLA/brain_carla_meiqi_no_segmentation_based_imitation_learning.py
@@ -0,0 +1,195 @@
+from brains.CARLA.utils.pilotnet_onehot import PilotNetOneHot
+from brains.CARLA.utils.test_utils import traffic_light_to_int, model_control, calculate_delta_yaw
+from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
+from brains.CARLA.utils.high_level_command import HighLevelCommandLoader
+from os import path
+
+import numpy as np
+
+import torch
+import time
+import math
+import carla
+
+PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/'
+
+class Brain:
+
+ def __init__(self, sensors, actuators, model=None, handler=None, config=None):
+ self.motors = actuators.get_motor('motors_0')
+ self.camera_rgb = sensors.get_camera('camera_0') # rgb front view camera
+ self.camera_seg = sensors.get_camera('camera_2') # segmentation camera
+ self.handler = handler
+ self.inference_times = []
+ self.gpu_inference = config['GPU']
+ self.device = torch.device('cuda' if (torch.cuda.is_available() and self.gpu_inference) else 'cpu')
+ self.red_light_counter = 0
+ self.running_light = False
+
+ client = carla.Client('localhost', 2000)
+ client.set_timeout(100.0)
+ world = client.get_world()
+ self.map = world.get_map()
+
+ weather = carla.WeatherParameters.ClearNoon
+ world.set_weather(weather)
+
+ self.vehicle = None
+ while self.vehicle is None:
+ for vehicle in world.get_actors().filter('vehicle.*'):
+ if vehicle.attributes.get('role_name') == 'ego_vehicle':
+ self.vehicle = vehicle
+ break
+ if self.vehicle is None:
+ print("Waiting for vehicle with role_name 'ego_vehicle'")
+ time.sleep(1) # sleep for 1 second before checking again
+
+ if model:
+ if not path.exists(PRETRAINED_MODELS + model):
+ print("File " + model + " cannot be found in " + PRETRAINED_MODELS)
+
+ if config['UseOptimized']:
+ self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device)
+ else:
+ #self.net = PilotNetOneHot((288, 200, 6), 3, 4, 4).to(self.device)
+ #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device)
+ self.net = PilotNetOneHot((288, 200, 3), 2, 4, 4).to(self.device) # combined control
+ self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device))
+ self.net.eval()
+
+ if 'Route' in config:
+ route = config['Route']
+ print('route: ', route)
+ else:
+ route = None
+
+ self.hlc_loader = HighLevelCommandLoader(self.vehicle, self.map, route=route)
+ self.prev_hlc = 0
+ self.prev_yaw = None
+ self.delta_yaw = 0
+
+ self.target_point = None
+ self.termination_code = 0 # 0: not terminated; 1: arrived at target; 2: wrong turn
+
+ def update_frame(self, frame_id, data):
+ """Update the information to be shown in one of the GUI's frames.
+
+ Arguments:
+ frame_id {str} -- Id of the frame that will represent the data
+ data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc)
+ """
+ if data.shape[0] != data.shape[1]:
+ if data.shape[0] > data.shape[1]:
+ difference = data.shape[0] - data.shape[1]
+ extra_left, extra_right = int(difference/2), int(difference/2)
+ extra_top, extra_bottom = 0, 0
+ else:
+ difference = data.shape[1] - data.shape[0]
+ extra_left, extra_right = 0, 0
+ extra_top, extra_bottom = int(difference/2), int(difference/2)
+
+
+ data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
+
+ self.handler.update_frame(frame_id, data)
+
+ def execute(self):
+ """Main loop of the brain. This will be called iteratively each TIME_CYCLE (see pilot.py)"""
+
+ rgb_image = self.camera_rgb.getImage().data
+ seg_image = self.camera_seg.getImage().data
+
+ self.update_frame('frame_0', rgb_image)
+ self.update_frame('frame_1', seg_image)
+
+ start_time = time.time()
+ try:
+ # calculate speed
+ velocity = self.vehicle.get_velocity()
+ speed_m_s = np.sqrt(velocity.x**2 + velocity.y**2 + velocity.z**2)
+ speed = 3.6 * speed_m_s #m/s to km/h
+
+ # read next high-level command or choose a random direction
+ hlc = self.hlc_loader.get_next_hlc()
+ if hlc != 0:
+ if self.prev_hlc == 0:
+ self.prev_yaw = self.vehicle.get_transform().rotation.yaw
+ else:
+ cur_yaw = self.vehicle.get_transform().rotation.yaw
+ self.delta_yaw += calculate_delta_yaw(self.prev_yaw, cur_yaw)
+ self.prev_yaw = cur_yaw
+
+ # detect whether the vehicle made the correct turn
+ turning_infraction = False
+ if self.prev_hlc != 0 and hlc == 0:
+ print(f'turned {self.delta_yaw} degrees')
+ if 45 < np.abs(self.delta_yaw) < 180:
+ if self.delta_yaw < 0 and self.prev_hlc != 1:
+ turning_infraction = True
+ elif self.delta_yaw > 0 and self.prev_hlc != 2:
+ turning_infraction = True
+ elif self.prev_hlc != 3:
+ turning_infraction = True
+ if turning_infraction:
+ print('Wrong Turn!!!')
+ self.termination_code = 2
+ self.delta_yaw = 0
+
+ self.prev_hlc = hlc
+
+ # get traffic light status
+ light_status = -1
+ vehicle_location = self.vehicle.get_transform().location
+ if self.vehicle.is_at_traffic_light():
+ traffic_light = self.vehicle.get_traffic_light()
+ light_status = traffic_light.get_state()
+ traffic_light_location = traffic_light.get_transform().location
+ distance_to_traffic_light = np.sqrt((vehicle_location.x - traffic_light_location.x)**2 + (vehicle_location.y - traffic_light_location.y)**2)
+ if light_status == carla.libcarla.TrafficLightState.Red and distance_to_traffic_light < 6 and speed_m_s > 5:
+ if not self.running_light:
+ self.running_light = True
+ self.red_light_counter += 1
+ else:
+ self.running_light = False
+ hlc = 0
+ print(f'high-level command: {hlc}')
+ print(f'light: {light_status}')
+ frame_data = {
+ 'hlc': hlc,
+ 'measurements': speed,
+ 'rgb': np.copy(rgb_image),
+ 'segmentation': np.copy(seg_image),
+ 'light': np.array([traffic_light_to_int(light_status)])
+ }
+
+
+ throttle, steer, brake = model_control(self.net,
+ frame_data,
+ ignore_traffic_light=False,
+ device=self.device,
+ combined_control=True)
+ #combined_control=False)
+
+
+ self.inference_times.append(time.time() - start_time)
+
+ print(throttle, steer, brake)
+
+ self.motors.sendThrottle(throttle)
+ #self.motors.sendThrottle(0.25)
+ self.motors.sendSteer(steer)
+ self.motors.sendBrake(brake)
+ #sself.motors.sendBrake(0.0)
+
+ # calculate distance to target point
+ # print(f'vehicle location: ({vehicle_location.x}, {-vehicle_location.y})')
+ # print(f'target point: ({self.target_point[0]}, {self.target_point[1]})')
+ if self.target_point != None:
+ distance_to_target = np.sqrt((self.target_point[0] - vehicle_location.x)**2 + (self.target_point[1] - (-vehicle_location.y))**2)
+ print(f'Euclidean distance to target: {distance_to_target}')
+ if distance_to_target < 1.5:
+ self.termination_code = 1
+
+
+ except Exception as err:
+ print(err)
\ No newline at end of file
diff --git a/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py b/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py
index d45c06d5..dcd60bee 100644
--- a/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py
+++ b/behavior_metrics/brains/CARLA/brain_carla_segmentation_based_imitation_learning.py
@@ -52,6 +52,8 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None):
self.net = torch.jit.load(PRETRAINED_MODELS + model).to(self.device)
else:
self.net = PilotNetOneHot((288, 200, 6), 3, 4, 4).to(self.device)
+ #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device)
+ #self.net = PilotNetOneHot((288, 200, 3), 3, 4, 4).to(self.device)
self.net.load_state_dict(torch.load(PRETRAINED_MODELS + model,map_location=self.device))
self.net.eval()
diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py
new file mode 100644
index 00000000..e03a568f
--- /dev/null
+++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+import csv
+import cv2
+import math
+import numpy as np
+import threading
+import time
+import carla
+from os import path
+from albumentations import (
+ Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare
+)
+from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
+from utils.logger import logger
+from traceback import print_exc
+
+PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/'
+
+from tensorflow.python.framework.errors_impl import NotFoundError
+from tensorflow.python.framework.errors_impl import UnimplementedError
+import tensorflow as tf
+import pandas as pd
+
+import os
+os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
+
+#gpus = tf.config.experimental.list_physical_devices('GPU')
+#for gpu in gpus:
+# tf.config.experimental.set_memory_growth(gpu, True)
+
+
+class Brain:
+
+ def __init__(self, sensors, actuators, handler, model, config=None):
+ self.camera_0 = sensors.get_camera('camera_0')
+ self.camera_1 = sensors.get_camera('camera_1')
+ self.camera_2 = sensors.get_camera('camera_2')
+ self.camera_3 = sensors.get_camera('camera_3')
+
+ self.cameras_first_images = []
+
+ self.pose = sensors.get_pose3d('pose3d_0')
+
+ self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0')
+
+ self.motors = actuators.get_motor('motors_0')
+ self.handler = handler
+ self.config = config
+ self.inference_times = []
+ self.gpu_inference = True if tf.test.gpu_device_name() else False
+
+ self.threshold_image = np.zeros((640, 360, 3), np.uint8)
+ self.color_image = np.zeros((640, 360, 3), np.uint8)
+
+ client = carla.Client('localhost', 2000)
+ client.set_timeout(10.0) # seconds
+ world = client.get_world()
+
+ time.sleep(5)
+ self.vehicle = world.get_actors().filter('vehicle.*')[0]
+
+ if model:
+ if not path.exists(PRETRAINED_MODELS + model):
+ logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS)
+ logger.info("** Load TF model **")
+ self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model)
+ logger.info("** Loaded TF model **")
+ else:
+ logger.info("** Brain not loaded **")
+ logger.info("- Models path: " + PRETRAINED_MODELS)
+ logger.info("- Model: " + str(model))
+
+ self.previous_speed = 0
+ self.previous_bird_eye_view_image = 0
+ self.bird_eye_view_images = 0
+ self.bird_eye_view_unique_images = 0
+
+ self.folder_name = '../../carla_dataset_ATLAS/18_03_anticlockwise_town_02/'
+
+ # open the file in the write mode
+ self.f = open(self.folder_name + 'dataset.csv', 'a')
+ # create the csv writer
+ self.writer = csv.writer(self.f)
+
+ try:
+ df = pd.read_csv(self.folder_name + 'dataset.csv')
+ self.batch_number = int(df.iloc[-1]['batch']) + 1
+ except Exception as ex:
+ #fkjfbdjkhfsd
+ self.batch_number = 0
+ header = ['batch', 'image_id', 'timestamp', 'throttle', 'steer', 'brake', 'location_x', 'location_y', 'previous_velocity', 'current_velocity']
+ self.writer.writerow(header)
+
+ print(self.batch_number)
+ self.frame_number = 0
+
+
+ def update_frame(self, frame_id, data):
+ """Update the information to be shown in one of the GUI's frames.
+
+ Arguments:
+ frame_id {str} -- Id of the frame that will represent the data
+ data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc)
+ """
+ if data.shape[0] != data.shape[1]:
+ if data.shape[0] > data.shape[1]:
+ difference = data.shape[0] - data.shape[1]
+ extra_left, extra_right = int(difference/2), int(difference/2)
+ extra_top, extra_bottom = 0, 0
+ else:
+ difference = data.shape[1] - data.shape[0]
+ extra_left, extra_right = 0, 0
+ extra_top, extra_bottom = int(difference/2), int(difference/2)
+
+
+ data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
+
+ self.handler.update_frame(frame_id, data)
+
+ def update_pose(self, pose_data):
+ self.handler.update_pose3d(pose_data)
+
+ def execute(self):
+ image = self.camera_0.getImage().data
+ image_1 = self.camera_1.getImage().data
+ image_2 = self.camera_2.getImage().data
+ image_3 = self.camera_3.getImage().data
+
+ base_img_copy = image.copy()
+
+ bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle)
+ bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB)
+
+ if self.cameras_first_images == []:
+ self.cameras_first_images.append(image)
+ self.cameras_first_images.append(image_1)
+ self.cameras_first_images.append(image_2)
+ self.cameras_first_images.append(image_3)
+ self.cameras_first_images.append(bird_eye_view_1)
+
+ self.cameras_last_images = [
+ image,
+ image_1,
+ image_2,
+ image_3,
+ bird_eye_view_1
+ ]
+
+ self.update_frame('frame_1', image_1)
+ self.update_frame('frame_2', image_2)
+ self.update_frame('frame_3', image_3)
+
+ self.update_frame('frame_0', bird_eye_view_1)
+
+ self.update_pose(self.pose.getPose3d())
+
+ image_shape=(66, 200)
+ img_base = cv2.resize(bird_eye_view_1, image_shape)
+
+ AUGMENTATIONS_TEST = Compose([
+ Normalize()
+ ])
+ image = AUGMENTATIONS_TEST(image=img_base)
+ img = image["image"]
+
+ self.bird_eye_view_images += 1
+ if (self.previous_bird_eye_view_image==img).all() == False:
+ self.bird_eye_view_unique_images += 1
+ self.previous_bird_eye_view_image = img
+
+ velocity_dim = np.full((200, 66), self.previous_speed/30)
+ new_img_vel = np.dstack((img, velocity_dim))
+ img = new_img_vel
+
+ img = np.expand_dims(img, axis=0)
+ start_time = time.time()
+ try:
+ prediction = self.net.predict(img, verbose=0)
+ self.inference_times.append(time.time() - start_time)
+ throttle = prediction[0][0]
+ steer = prediction[0][1] * (1 - (-1)) + (-1)
+ break_command = prediction[0][2]
+ speed = self.vehicle.get_velocity()
+ vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2)
+
+ '''
+ Save dataset
+ '''
+ from PIL import Image
+ pil_image = Image.fromarray(base_img_copy)
+ pil_image.save(self.folder_name + str(self.batch_number) + '_' + str(self.frame_number) + '.png')
+ #cv2.imwrite('../../carla_dataset_ATLAS/18_03_anticlockwise_town_02/' + str(self.batch_number) + '_' + str(self.frame_number) + '.png', img)
+ vehicle_location = self.vehicle.get_location()
+ row = [self.batch_number, str(self.batch_number) + '_' + str(self.frame_number) + '.png', time.time(), throttle, steer, break_command, vehicle_location.x, vehicle_location.y, self.previous_speed, vehicle_speed]
+
+ print(str(self.batch_number) + '_' + str(self.frame_number) + '.png')
+ print(row)
+
+ self.writer.writerow(row)
+ self.frame_number += 1
+ self.previous_speed = vehicle_speed
+
+ if vehicle_speed < 5:
+ self.motors.sendThrottle(1.0)
+ self.motors.sendSteer(0.0)
+ self.motors.sendBrake(0)
+ else:
+ self.motors.sendThrottle(throttle)
+ self.motors.sendSteer(steer)
+ self.motors.sendBrake(break_command)
+
+
+
+ except NotFoundError as ex:
+ logger.info('Error inside brain: NotFoundError!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+ except UnimplementedError as ex:
+ logger.info('Error inside brain: UnimplementedError!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+ except Exception as ex:
+ logger.info('Error inside brain: Exception!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+
+
+
+
+
diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py
new file mode 100644
index 00000000..7cacca82
--- /dev/null
+++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_lerning_atlas_uc3m.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+import csv
+import cv2
+import math
+import numpy as np
+import threading
+import time
+import carla
+from os import path
+from albumentations import (
+ Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout
+)
+from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
+from utils.logger import logger
+from traceback import print_exc
+
+PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/'
+
+from tensorflow.python.framework.errors_impl import NotFoundError
+from tensorflow.python.framework.errors_impl import UnimplementedError
+import tensorflow as tf
+
+
+#import os
+#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
+
+gpus = tf.config.experimental.list_physical_devices('GPU')
+for gpu in gpus:
+ tf.config.experimental.set_memory_growth(gpu, True)
+
+class Brain:
+
+ def __init__(self, sensors, actuators, handler, model, config=None):
+ self.camera_0 = sensors.get_camera('camera_0')
+ self.camera_1 = sensors.get_camera('camera_1')
+ self.camera_2 = sensors.get_camera('camera_2')
+ self.camera_3 = sensors.get_camera('camera_3')
+
+ self.cameras_first_images = []
+
+ self.pose = sensors.get_pose3d('pose3d_0')
+
+ self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0')
+
+ self.motors = actuators.get_motor('motors_0')
+ self.handler = handler
+ self.config = config
+ self.inference_times = []
+ self.gpu_inference = True if tf.test.gpu_device_name() else False
+
+ self.threshold_image = np.zeros((640, 360, 3), np.uint8)
+ self.color_image = np.zeros((640, 360, 3), np.uint8)
+
+ client = carla.Client('localhost', 2000)
+ client.set_timeout(30.0) # seconds
+ world = client.get_world()
+
+ time.sleep(5)
+ for vehicle in world.get_actors().filter('vehicle.*'):
+ if vehicle.attributes.get('role_name') == 'ego_vehicle':
+ self.vehicle = vehicle
+ break
+
+ if model:
+ if not path.exists(PRETRAINED_MODELS + model):
+ logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS)
+ logger.info("** Load TF model **")
+ self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False)
+ logger.info("** Loaded TF model **")
+ else:
+ logger.info("** Brain not loaded **")
+ logger.info("- Models path: " + PRETRAINED_MODELS)
+ logger.info("- Model: " + str(model))
+
+ self.previous_bird_eye_view_image = 0
+ self.bird_eye_view_images = 0
+ self.bird_eye_view_unique_images = 0
+
+
+ def update_frame(self, frame_id, data):
+ """Update the information to be shown in one of the GUI's frames.
+
+ Arguments:
+ frame_id {str} -- Id of the frame that will represent the data
+ data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc)
+ """
+ if data.shape[0] != data.shape[1]:
+ if data.shape[0] > data.shape[1]:
+ difference = data.shape[0] - data.shape[1]
+ extra_left, extra_right = int(difference/2), int(difference/2)
+ extra_top, extra_bottom = 0, 0
+ else:
+ difference = data.shape[1] - data.shape[0]
+ extra_left, extra_right = 0, 0
+ extra_top, extra_bottom = int(difference/2), int(difference/2)
+
+
+ data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0)
+
+ self.handler.update_frame(frame_id, data)
+
+ def update_pose(self, pose_data):
+ self.handler.update_pose3d(pose_data)
+
+ def execute(self):
+ image = self.camera_0.getImage().data
+ image_1 = self.camera_1.getImage().data
+ image_2 = self.camera_2.getImage().data
+ image_3 = self.camera_3.getImage().data
+
+ bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle)
+ bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB)
+
+ if self.cameras_first_images == []:
+ self.cameras_first_images.append(image)
+ self.cameras_first_images.append(image_1)
+ self.cameras_first_images.append(image_2)
+ self.cameras_first_images.append(image_3)
+ self.cameras_first_images.append(bird_eye_view_1)
+
+ self.cameras_last_images = [
+ image,
+ image_1,
+ image_2,
+ image_3,
+ bird_eye_view_1
+ ]
+
+ self.update_frame('frame_0', image)
+ self.update_frame('frame_1', image_1)
+ self.update_frame('frame_2', image_2)
+ self.update_frame('frame_3', image_3)
+
+ '''
+ nombre_archivo = "imagen_guardada.png"
+ imagen_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+ cv2.imwrite(nombre_archivo, imagen_rgb)
+ nombre_archivo = "image_1_guardada.png"
+ imagen_rgb = cv2.cvtColor(image_1, cv2.COLOR_BGR2RGB)
+ cv2.imwrite(nombre_archivo, imagen_rgb)
+ nombre_archivo = "image_2_guardada.png"
+ imagen_rgb = cv2.cvtColor(image_2, cv2.COLOR_BGR2RGB)
+ cv2.imwrite(nombre_archivo, imagen_rgb)
+ nombre_archivo = "image_3_guardada.png"
+ imagen_rgb = cv2.cvtColor(image_3, cv2.COLOR_BGR2RGB)
+ cv2.imwrite(nombre_archivo, imagen_rgb)
+ '''
+ #self.update_frame('frame_0', bird_eye_view_1)
+
+ self.update_pose(self.pose.getPose3d())
+
+ image_shape=(66, 200)
+ #image_shape=(50, 150)
+ img_base = cv2.resize(bird_eye_view_1, image_shape)
+
+ AUGMENTATIONS_TEST = Compose([
+ Normalize()
+ ])
+ image = AUGMENTATIONS_TEST(image=img_base)
+ img = image["image"]
+
+ self.bird_eye_view_images += 1
+ if (self.previous_bird_eye_view_image==img).all() == False:
+ self.bird_eye_view_unique_images += 1
+ self.previous_bird_eye_view_image = img
+
+ img = np.expand_dims(img, axis=0)
+ start_time = time.time()
+ try:
+ prediction = self.net.predict(img, verbose=0)
+ self.inference_times.append(time.time() - start_time)
+ throttle = prediction[0][0]
+ steer = prediction[0][1] * (1 - (-1)) + (-1)
+ break_command = prediction[0][2]
+
+ speed = self.vehicle.get_velocity()
+ vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2)
+
+ if vehicle_speed < 5:
+ self.motors.sendThrottle(1.0)
+ self.motors.sendSteer(0.0)
+ self.motors.sendBrake(0)
+ else:
+ self.motors.sendThrottle(throttle)
+ self.motors.sendSteer(steer)
+ self.motors.sendBrake(break_command)
+ except NotFoundError as ex:
+ logger.info('Error inside brain: NotFoundError!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+ except UnimplementedError as ex:
+ logger.info('Error inside brain: UnimplementedError!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+ except Exception as ex:
+ logger.info('Error inside brain: Exception!')
+ logger.warning(type(ex).__name__)
+ print_exc()
+ raise Exception(ex)
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch
new file mode 100644
index 00000000..5cf5b766
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge.launch
@@ -0,0 +1,100 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch
new file mode 100644
index 00000000..67c8b5f0
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch
@@ -0,0 +1,85 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch
new file mode 100644
index 00000000..15c389d2
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch
@@ -0,0 +1,83 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch
new file mode 100644
index 00000000..1125d783
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_parque01.launch
@@ -0,0 +1,90 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py b/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py
new file mode 100644
index 00000000..293f1348
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/image_transform.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+# license removed for brevity
+
+import rospy
+import numpy as np
+import roslib
+import tf
+from nav_msgs.msg import Odometry
+from std_msgs.msg import Float64
+from sensor_msgs.msg import PointCloud2
+from geometry_msgs.msg import PoseWithCovarianceStamped
+
+from sensor_msgs.msg import Imu
+
+from sensor_msgs.msg import Image
+from sensor_msgs.msg import CameraInfo
+
+import sys
+import cv2
+from std_msgs.msg import String
+from cv_bridge import CvBridge, CvBridgeError
+
+pub = []
+
+def image_update(msg):
+ bridge = CvBridge()
+ new_img = Image()
+ new_img_cv = bridge.imgmsg_to_cv2(msg,"bgr8")
+ new_img = bridge.cv2_to_imgmsg(new_img_cv,"bgr8")
+ new_img.header = msg.header
+ new_img.header.frame_id = "pylon_camera"
+ pub.publish(new_img)
+
+def image_info_update(msg):
+ new_img_info = CameraInfo()
+ new_img_info = msg
+ new_img_info.header.frame_id = "pylon_camera"
+ new_img_info.distortion_model = "plumb_bob"
+
+ # copy the camera info of atlas here
+ new_img_info.D = [-0.288902, 0.085422, 0.0013, -6.3e-05, 0.0]
+ new_img_info.K = [664.9933754342509, 0.0, 1024.0, 0.0, 664.9933754342509, 768.0, 0.0, 0.0, 1.0]
+ new_img_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
+ new_img_info.P = [664.9933754342509, 0.0, 1024.0, 0.0, 0.0, 664.9933754342509, 768.0, 0.0, 0.0, 0.0, 1.0, 0.0]
+
+ pub_2.publish(new_img_info)
+
+if __name__ == '__main__':
+ rospy.init_node('camera_rename')
+ # pub = rospy.Publisher('/ada/Lidar32/point_cloud', PointCloud2, queue_size=1)
+ pub = rospy.Publisher('/ada/rgb_front/image_color', Image, queue_size=1)
+ pub_2 = rospy.Publisher('/ada/rgb_front/camera_info',CameraInfo, queue_size=1)
+
+ #img_sub = rospy.Subscriber('/carla/ego_vehicle/camera/rgb/rgb_front/image_color', Image, image_update)
+ img_sub = rospy.Subscriber('/carla/ego_vehicle/rgb_front/image', Image, image_update)
+ #img_inf_sub = rospy.Subscriber('/carla/ego_vehicle/camera/rgb/rgb_front/camera_info', CameraInfo, image_info_update)
+ img_inf_sub = rospy.Subscriber('/carla/ego_vehicle/rgb_front/camera_info', CameraInfo, image_info_update)
+
+
+ rospy.spin()
diff --git a/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch
new file mode 100644
index 00000000..cc7405d8
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/CARLA_launch_files/town_02_anticlockwise_meiqi.launch
@@ -0,0 +1,78 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/behavior_metrics/configs/CARLA/default_carla.yml b/behavior_metrics/configs/CARLA/default_carla.yml
index ddc3427c..c4466576 100644
--- a/behavior_metrics/configs/CARLA/default_carla.yml
+++ b/behavior_metrics/configs/CARLA/default_carla.yml
@@ -33,9 +33,9 @@ Behaviors:
Topic: '/carla/ego_vehicle/vehicle_control_cmd'
MaxV: 3
MaxW: 0.3
- BrainPath: 'brains/CARLA/brain_carla_slow_and_turn.py'
+ BrainPath: 'brains/CARLA/brain_carla_autopilot.py'
PilotTimeCycle: 50 # Turn up to reduce number of control decisions
- AsyncMode: True # Set to False to control simulator time
+ AsyncMode: False # Set to False to control simulator time
Parameters:
Model: ''
ImageCropped: True
@@ -47,7 +47,7 @@ Behaviors:
ImageTranform: ''
Type: 'CARLA'
Simulation:
- World: configs/CARLA/CARLA_launch_files/town_02_anticlockwise.launch
+ World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch
RandomSpawnPoint: False
Dataset:
In: '/tmp/my_bag.bag'
diff --git a/behavior_metrics/configs/CARLA/default_carla_autopilot.yml b/behavior_metrics/configs/CARLA/default_carla_autopilot.yml
index 2eeeb996..374e8036 100644
--- a/behavior_metrics/configs/CARLA/default_carla_autopilot.yml
+++ b/behavior_metrics/configs/CARLA/default_carla_autopilot.yml
@@ -36,6 +36,7 @@ Behaviors:
BrainPath: 'brains/CARLA/brain_carla_autopilot.py'
PilotTimeCycle: 300
+ AsyncMode: False # Set to False to control simulator time
Parameters:
ImageCropped: True
ImageSize: [ 100,50 ]
@@ -47,7 +48,8 @@ Behaviors:
Type: 'CARLA'
RandomSpawnPoint: False
Simulation:
- World: configs/CARLA/CARLA_launch_files/town_01_anticlockwise.launch
+ World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated_autopilot.launch
+ RandomSpawnPoint: False
Dataset:
In: '/tmp/my_bag.bag'
Out: ''
diff --git a/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml b/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml
index 46fae066..bcd84b52 100644
--- a/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml
+++ b/behavior_metrics/configs/CARLA/default_carla_tensorflow.yml
@@ -33,11 +33,11 @@ Behaviors:
Topic: '/carla/ego_vehicle/vehicle_control_cmd'
MaxV: 3
MaxW: 0.3
- BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning.py'
+ BrainPath: 'brains/CARLA/brain_carla_subjective_vision_deep_learning_previous_v.py'
PilotTimeCycle: 50 # Turn up to reduce number of control decisions
AsyncMode: True # Set to False to control simulator time
Parameters:
- Model: 'pilotnet.h5'
+ Model: 'subjective_vision_pilotnet/pilotnet_starstar_traffic_6.h5'
ImageCropped: True
ImageSize: [ 200,66 ]
ImageNormalized: True
diff --git a/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml b/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml
new file mode 100644
index 00000000..07944ed0
--- /dev/null
+++ b/behavior_metrics/configs/CARLA/default_carla_tensorflow_updated.yml
@@ -0,0 +1,80 @@
+Behaviors:
+ Robot:
+ Sensors:
+ Cameras:
+ Camera_0:
+ Name: 'camera_0'
+ Topic: '/carla/ego_vehicle/rgb_front/image'
+ Camera_1:
+ Name: 'camera_1'
+ Topic: '/carla/ego_vehicle/rgb_view/image'
+ Camera_2:
+ Name: 'camera_2'
+ Topic: '/ada/rgb_front/image_color'
+ Camera_3:
+ Name: 'camera_3'
+ Topic: '/ada/rgb_front/image_rect_color'
+ #Camera_2:
+ # Name: 'camera_2'
+ # Topic: '/carla/ego_vehicle/semantic_segmentation_front/image'
+ #Camera_3:
+ # Name: 'camera_3'
+ # Topic: '/carla/ego_vehicle/dvs_front/image'
+ Pose3D:
+ Pose3D_0:
+ Name: 'pose3d_0'
+ Topic: '/carla/ego_vehicle/odometry'
+ BirdEyeView:
+ BirdEyeView_0:
+ Name: 'bird_eye_view_0'
+ Topic: ''
+ Speedometer:
+ Speedometer_0:
+ Name: 'speedometer_0'
+ Topic: '/carla/ego_vehicle/speedometer'
+ Actuators:
+ CARLA_Motors:
+ Motors_0:
+ Name: 'motors_0'
+ Topic: '/carla/ego_vehicle/vehicle_control_cmd'
+ MaxV: 3
+ MaxW: 0.3
+ BrainPath: 'brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_generate_dataset.py'
+ PilotTimeCycle: 50 # Turn up to reduce number of control decisions
+ AsyncMode: True # Set to False to control simulator time
+ Parameters:
+ Model: 'pilotnet_no_visual_kinematic.h5'
+ ImageCropped: True
+ ImageSize: [ 200,66 ]
+ ImageNormalized: True
+ PredictionsNormalized: True
+ GPU: True
+ UseOptimized: False
+ ImageTranform: ''
+ Type: 'CARLA'
+ Simulation:
+ World: configs/CARLA/CARLA_launch_files/carla_new_ada_bridge_updated.launch
+ RandomSpawnPoint: False
+ Dataset:
+ In: '/tmp/my_bag.bag'
+ Out: ''
+ Stats:
+ Out: './'
+ PerfectLap: './perfect_bags/lap-simple-circuit.bag'
+ Layout:
+ Frame_0:
+ Name: frame_0
+ Geometry: [1, 1, 1, 1]
+ Data: rgbimage
+ Frame_1:
+ Name: frame_1
+ Geometry: [0, 1, 1, 1]
+ Data: rgbimage
+ Frame_2:
+ Name: frame_2
+ Geometry: [0, 2, 1, 1]
+ Data: rgbimage
+ Frame_3:
+ Name: frame_3
+ Geometry: [1, 2, 1, 1]
+ Data: rgbimage
diff --git a/behavior_metrics/robot/interfaces/speedometer.py b/behavior_metrics/robot/interfaces/speedometer.py
index 4a75a079..f0723659 100644
--- a/behavior_metrics/robot/interfaces/speedometer.py
+++ b/behavior_metrics/robot/interfaces/speedometer.py
@@ -22,7 +22,7 @@ def __init__(self):
self.timeStamp = 0 # Time stamp [s]
def __str__(self):
- s = "Speedometer: {\n x: " + str(self.x) + "\n timeStamp: " + str(self.timeStamp) + "\n}"
+ s = "Speedometer: {\n data: " + str(self.data) + "\n timeStamp: " + str(self.timeStamp) + "\n}"
return s
diff --git a/behavior_metrics/utils/environment.py b/behavior_metrics/utils/environment.py
index 8225c697..e400b708 100644
--- a/behavior_metrics/utils/environment.py
+++ b/behavior_metrics/utils/environment.py
@@ -73,9 +73,13 @@ def launch_env(launch_file, random_spawn_point=False, carla_simulator=False, con
tree = ET.parse(ROOT_PATH + '/' + launch_file)
root = tree.getroot()
quality = root.find(".//*[@name=\"quality\"]")
+
if quality == None:
- subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err)
+ print('>>>>>>>> HIGH QUALITY RENDERING!')
+ subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen"], stdout=out, stderr=err)
+ #subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err)
elif quality.attrib['default'] == 'Low':
+ print('>>>>>>>> LOW QUALITY RENDERING!')
subprocess.Popen([os.environ["CARLA_ROOT"] + "CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err)
#subprocess.Popen(["/home/jderobot/Documents/Projects/carla_simulator_0_9_13/CarlaUE4.sh", "-RenderOffScreen", "-quality-level=Low"], stdout=out, stderr=err)
logger.info("SimulatorEnv: launching simulator server.")