Skip to content

Commit

Permalink
Brains separated between CARLA and gazebo
Browse files Browse the repository at this point in the history
  • Loading branch information
sergiopaniego committed Sep 20, 2023
1 parent 7cc2564 commit f81c845
Show file tree
Hide file tree
Showing 129 changed files with 379 additions and 21 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from PIL import Image
from brains.f1.torch_utils.pilotnet import PilotNet
from brains.CARLA.pytorch.utils.pilotnet import PilotNet
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path
from albumentations import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from PIL import Image
from brains.f1.torch_utils.pilotnet import PilotNet
from brains.CARLA.pytorch.utils.pilotnet import PilotNet
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path
from albumentations import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from PIL import Image
from brains.f1.torch_utils.pilotnet import PilotNet
from brains.CARLA.pytorch.utils.pilotnet import PilotNet
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path
from albumentations import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from PIL import Image
from brains.f1.torch_utils.pilotnet import PilotNet
from brains.CARLA.pytorch.utils.pilotnet import PilotNet
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path
from albumentations import (
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ def execute(self):
self.update_pose(self.pose.getPose3d())

image_shape=(66, 200)
#image_shape=(50, 150)
img_base = cv2.resize(bird_eye_view_1, image_shape)

AUGMENTATIONS_TEST = Compose([
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __init__(self, sensors, actuators, handler, config=None):
'algorithm': 'dqn',
'environment': 'simple',
'agent': 'f1',
'filename': 'brains/f1/config/config_inference_followline_dqn_f1_gazebo.yaml'
'filename': 'brains/gazebo/f1/config/config_inference_followline_dqn_f1_gazebo.yaml'
}

f = open(args['filename'], "r")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self, sensors, actuators, handler, config=None):
'algorithm': 'qlearn',
'environment': 'simple',
'agent': 'f1',
'filename': 'brains/f1/config/config_f1_qlearn.yaml'
'filename': 'brains/gazebo/sf1/config/config_f1_qlearn.yaml'
}

f = open(args['filename'], "r")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
import time
import os
from PIL import Image
from brains.f1.torch_utils.pilotnet import PilotNet
from brains.gazebo.f1.torch_utils.pilotnet import PilotNetss
from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH
from os import path

Expand Down
File renamed without changes.
195 changes: 195 additions & 0 deletions behavior_metrics/brains/gazebo/f1/torch_utils/convlstm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
"""
This implementation of Convolutional LSTM has been adapted from https://github.com/ndrplz/ConvLSTM_pytorch.
"""

import torch.nn as nn
import torch


class ConvLSTMCell(nn.Module):

def __init__(self, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""

super(ConvLSTMCell, self).__init__()

self.input_dim = input_dim
self.hidden_dim = hidden_dim

self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias

self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)

def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state

combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis

combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)

c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)

return h_next, c_next

def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))


class ConvLSTM(nn.Module):

"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
bias: Bias or no bias in Convolution
return_all_layers: Return the list of computations for all layers
Note: Will do same padding.
Input:
A tensor of size B, T, C, H, W or T, B, C, H, W
Output:
A tuple of two lists of length num_layers (or length 1 if return_all_layers is False).
0 - layer_output_list is the list of lists of length T of each output
1 - last_state_list is the list of last states
each element of the list is a tuple (h, c) for hidden state and memory
Example:
>> x = torch.rand((32, 10, 64, 128, 128))
>> convlstm = ConvLSTM(64, 16, 3, 1, True, True, False)
>> _, last_states = convlstm(x)
>> h = last_states[0][0] # 0 for layer index, 0 for h index
"""

def __init__(self, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()

self._check_kernel_size_consistency(kernel_size)

# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')

self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers

cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]

cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))

self.cell_list = nn.ModuleList(cell_list)

def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor = input_tensor.permute(1, 0, 2, 3, 4)

b, _, _, h, w = input_tensor.size()

# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
# Since the init is done in forward. Can send image size here
hidden_state = self._init_hidden(batch_size=b,
image_size=(h, w))

layer_output_list = []
last_state_list = []

seq_len = input_tensor.size(1)
cur_layer_input = input_tensor

for layer_idx in range(self.num_layers):

h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)

layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output

layer_output_list.append(layer_output)
last_state_list.append([h, c])

if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]

return layer_output_list, last_state_list

def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states

@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')

@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import torch
import torch.nn as nn
from .convlstm import ConvLSTM

class DeepestLSTMTinyPilotNet(nn.Module):
def __init__(self, image_shape, num_labels):
super(DeepestLSTMTinyPilotNet, self).__init__()
self.num_channels = image_shape[2]
self.cn_1 = nn.Conv2d(self.num_channels, 8, kernel_size=3, stride=2)
self.relu_1 = nn.ReLU()
self.cn_2 = nn.Conv2d(8, 8, kernel_size=3, stride=2)
self.relu_2 = nn.ReLU()
self.cn_3 = nn.Conv2d(8, 8, kernel_size=3, stride=2)
self.relu_3 = nn.ReLU()
self.dropout_1 = nn.Dropout(0.2)

self.clstm_n = ConvLSTM(8, 8, (5, 5), 3, batch_first=True, bias=True, return_all_layers=False)

self.fc_1 = nn.Linear(8*11*5, 50)
self.relu_fc_1 = nn.ReLU()
self.fc_2 = nn.Linear(50, 10)
self.relu_fc_2 = nn.ReLU()
self.fc_3 = nn.Linear(10, num_labels)

def forward(self, img):
out = self.cn_1(img)
out = self.relu_1(out)
out = self.cn_2(out)
out = self.relu_2(out)
out = self.cn_3(out)
out = self.relu_3(out)
out = self.dropout_1(out)
# add additional dimension at 1
out = out.unsqueeze(1)

_, last_states = self.clstm_n(out)
out = last_states[0][0] # 0 for layer index, 0 for h index
# flatten
out = out.reshape(out.size(0), -1)
out = self.fc_1(out)
out = self.relu_fc_1(out)
out = self.fc_2(out)
out = self.relu_fc_2(out)
out = self.fc_3(out)
return out
58 changes: 58 additions & 0 deletions behavior_metrics/brains/gazebo/f1/torch_utils/pilotnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import torch
import torch.nn as nn


class PilotNet(nn.Module):
def __init__(self,
image_shape,
num_labels):
super(PilotNet, self).__init__()

self.img_height = image_shape[0]
self.img_width = image_shape[1]
self.num_channels = image_shape[2]

self.output_size = num_labels

self.ln_1 = nn.BatchNorm2d(self.num_channels, eps=1e-03)

self.cn_1 = nn.Conv2d(self.num_channels, 24, kernel_size=5, stride=2)
self.cn_2 = nn.Conv2d(24, 36, kernel_size=5, stride=2)
self.cn_3 = nn.Conv2d(36, 48, kernel_size=5, stride=2)
self.cn_4 = nn.Conv2d(48, 64, kernel_size=3, stride=1)
self.cn_5 = nn.Conv2d(64, 64, kernel_size=3, stride=1)

self.fc_1 = nn.Linear(1 * 18 * 64, 1164)
self.fc_2 = nn.Linear(1164, 100)
self.fc_3 = nn.Linear(100, 50)
self.fc_4 = nn.Linear(50, 10)
self.fc_5 = nn.Linear(10, self.output_size)

def forward(self, img):

out = self.ln_1(img)

out = self.cn_1(out)
out = torch.relu(out)
out = self.cn_2(out)
out = torch.relu(out)
out = self.cn_3(out)
out = torch.relu(out)
out = self.cn_4(out)
out = torch.relu(out)
out = self.cn_5(out)
out = torch.relu(out)

out = out.reshape(out.size(0), -1)

out = self.fc_1(out)
out = torch.relu(out)
out = self.fc_2(out)
out = torch.relu(out)
out = self.fc_3(out)
out = torch.relu(out)
out = self.fc_4(out)
out = torch.relu(out)
out = self.fc_5(out)

return out
Loading

0 comments on commit f81c845

Please sign in to comment.