diff --git a/ezyrb/approximation/ann.py b/ezyrb/approximation/ann.py index 21d2541..2d0802d 100755 --- a/ezyrb/approximation/ann.py +++ b/ezyrb/approximation/ann.py @@ -60,6 +60,13 @@ def __init__(self, layers, function, stop_training, loss=None, if not isinstance(stop_training, list): stop_training = [stop_training] + if torch.cuda.is_available(): # Check if GPU is available + print("Using cuda device") + torch.cuda.empty_cache() + self.use_cuda = True + else: + self.use_cuda = False + self.layers = layers self.function = function self.loss = loss @@ -153,13 +160,19 @@ def fit(self, points, values): """ self._build_model(points, values) + + if self.use_cuda: + self.model = self.model.cuda() + points = self._convert_numpy_to_torch(points).cuda() + values = self._convert_numpy_to_torch(values).cuda() + else: + points = self._convert_numpy_to_torch(points) + values = self._convert_numpy_to_torch(values) + optimizer = self.optimizer( self.model.parameters(), lr=self.lr, weight_decay=self.l2_regularization) - points = self._convert_numpy_to_torch(points) - values = self._convert_numpy_to_torch(values) - n_epoch = 1 flag = True while flag: @@ -198,6 +211,12 @@ def predict(self, new_point): :return: the predicted values via the ANN. :rtype: numpy.ndarray """ - new_point = self._convert_numpy_to_torch(np.array(new_point)) - y_new = self.model(new_point) - return self._convert_torch_to_numpy(y_new) + if self.use_cuda : + new_point = self._convert_numpy_to_torch(new_point).cuda() + new_point = self._convert_numpy_to_torch( + np.array(new_point.cpu())).cuda() + y_new = self._convert_torch_to_numpy(self.model(new_point).cpu()) + else: + new_point = self._convert_numpy_to_torch(np.array(new_point)) + y_new = self._convert_torch_to_numpy(self.model(new_point)) + return y_new diff --git a/ezyrb/reduction/ae.py b/ezyrb/reduction/ae.py index fdca933..713f8f3 100644 --- a/ezyrb/reduction/ae.py +++ b/ezyrb/reduction/ae.py @@ -3,6 +3,7 @@ """ import torch +import numpy as np from .reduction import Reduction from ..approximation import ANN @@ -90,6 +91,13 @@ def __init__(self, if not isinstance(stop_training, list): stop_training = [stop_training] + if torch.cuda.is_available(): # Check if GPU is available + print("Using cuda device") + torch.cuda.empty_cache() + self.use_cuda = True + else: + self.use_cuda = False + self.layers_encoder = layers_encoder self.layers_decoder = layers_decoder self.function_encoder = function_encoder @@ -153,7 +161,12 @@ def fit(self, values): list(self.encoder.parameters()) + list(self.decoder.parameters()), lr=self.lr, weight_decay=self.l2_regularization) - values = self._convert_numpy_to_torch(values) + if self.use_cuda: + self.encoder = self.encoder.cuda() + self.decoder = self.decoder.cuda() + values = self._convert_numpy_to_torch(values).cuda() + else: + values = self._convert_numpy_to_torch(values) n_epoch = 1 flag = True @@ -191,7 +204,11 @@ def transform(self, X): :param numpy.ndarray X: the input snapshots matrix (stored by column). """ - X = self._convert_numpy_to_torch(X).T + if self.use_cuda: + X = self._convert_numpy_to_torch(X).T.cuda() + X = self._convert_numpy_to_torch(np.array(X.cpu())).cuda() + else: + X = self._convert_numpy_to_torch(X).T g = self.encoder(X) return g.cpu().detach().numpy().T @@ -201,7 +218,11 @@ def inverse_transform(self, g): :param: numpy.ndarray g the latent variables. """ - g = self._convert_numpy_to_torch(g).T + if self.use_cuda: + g = self._convert_numpy_to_torch(g).T.cuda() + g = self._convert_numpy_to_torch(np.array(g.cpu())).cuda() + else: + g = self._convert_numpy_to_torch(g).T u = self.decoder(g) return u.cpu().detach().numpy().T