diff --git a/pina/__init__.py b/pina/__init__.py index 3bc28ae6..e9ce7061 100644 --- a/pina/__init__.py +++ b/pina/__init__.py @@ -1,6 +1,6 @@ __all__ = [ - "Trainer", "LabelTensor", "Plotter", "Condition", "SamplePointDataset", - "PinaDataModule", "PinaDataLoader", 'TorchOptimizer', 'Graph' + "Trainer", "LabelTensor", "Plotter", "Condition", + "PinaDataModule", 'TorchOptimizer', 'Graph', ] from .meta import * @@ -9,9 +9,9 @@ from .trainer import Trainer from .plotter import Plotter from .condition.condition import Condition -from .data import SamplePointDataset + from .data import PinaDataModule -from .data import PinaDataLoader + from .optim import TorchOptimizer from .optim import TorchScheduler from .graph import Graph diff --git a/pina/collector.py b/pina/collector.py index 3219b2b6..1f0fb41d 100644 --- a/pina/collector.py +++ b/pina/collector.py @@ -1,3 +1,4 @@ +from . import LabelTensor from .utils import check_consistency, merge_tensors @@ -66,9 +67,12 @@ def store_sample_domains(self, n, mode, variables, sample_locations): for loc in sample_locations: # get condition condition = self.problem.conditions[loc] + condition_domain = condition.domain + if isinstance(condition_domain, str): + condition_domain = self.problem.domains[condition_domain] keys = ["input_points", "equation"] # if the condition is not ready, we get and store the data - if (not self._is_conditions_ready[loc]): + if not self._is_conditions_ready[loc]: # if it is the first time we sample if not self.data_collections[loc]: already_sampled = [] @@ -84,10 +88,11 @@ def store_sample_domains(self, n, mode, variables, sample_locations): # get the samples samples = [ - condition.domain.sample(n=n, mode=mode, variables=variables) - ] + already_sampled + condition_domain.sample(n=n, mode=mode, + variables=variables) + ] + already_sampled pts = merge_tensors(samples) - if (set(pts.labels).issubset(sorted(self.problem.input_variables))): + if set(pts.labels).issubset(sorted(self.problem.input_variables)): pts = pts.sort_labels() if sorted(pts.labels) == sorted(self.problem.input_variables): self._is_conditions_ready[loc] = True @@ -110,5 +115,6 @@ def add_points(self, new_points_dict): if not self._is_conditions_ready[k]: raise RuntimeError( 'Cannot add points on a non sampled condition') - self.data_collections[k]['input_points'] = self.data_collections[k][ - 'input_points'].vstack(v) + self.data_collections[k]['input_points'] = LabelTensor.vstack( + [self.data_collections[k][ + 'input_points'], v]) diff --git a/pina/condition/data_condition.py b/pina/condition/data_condition.py index c6777231..255c329f 100644 --- a/pina/condition/data_condition.py +++ b/pina/condition/data_condition.py @@ -18,12 +18,11 @@ class DataConditionInterface(ConditionInterface): def __init__(self, input_points, conditional_variables=None): """ - TODO + TODO : add docstring """ super().__init__() self.input_points = input_points self.conditional_variables = conditional_variables - self._condition_type = 'unsupervised' def __setattr__(self, key, value): if (key == 'input_points') or (key == 'conditional_variables'): diff --git a/pina/condition/domain_equation_condition.py b/pina/condition/domain_equation_condition.py index 58dca70b..9fb0dcb2 100644 --- a/pina/condition/domain_equation_condition.py +++ b/pina/condition/domain_equation_condition.py @@ -16,16 +16,15 @@ class DomainEquationCondition(ConditionInterface): def __init__(self, domain, equation): """ - TODO + TODO : add docstring """ super().__init__() self.domain = domain self.equation = equation - self._condition_type = 'physics' def __setattr__(self, key, value): if key == 'domain': - check_consistency(value, (DomainInterface)) + check_consistency(value, (DomainInterface, str)) DomainEquationCondition.__dict__[key].__set__(self, value) elif key == 'equation': check_consistency(value, (EquationInterface)) diff --git a/pina/condition/input_equation_condition.py b/pina/condition/input_equation_condition.py index bf05130c..a5971114 100644 --- a/pina/condition/input_equation_condition.py +++ b/pina/condition/input_equation_condition.py @@ -17,12 +17,11 @@ class InputPointsEquationCondition(ConditionInterface): def __init__(self, input_points, equation): """ - TODO + TODO : add docstring """ super().__init__() self.input_points = input_points self.equation = equation - self._condition_type = 'physics' def __setattr__(self, key, value): if key == 'input_points': diff --git a/pina/condition/input_output_condition.py b/pina/condition/input_output_condition.py index 08ed21d9..5cf5518e 100644 --- a/pina/condition/input_output_condition.py +++ b/pina/condition/input_output_condition.py @@ -1,4 +1,5 @@ import torch +import torch_geometric from .condition_interface import ConditionInterface from ..label_tensor import LabelTensor @@ -16,16 +17,15 @@ class InputOutputPointsCondition(ConditionInterface): def __init__(self, input_points, output_points): """ - TODO + TODO : add docstring """ super().__init__() self.input_points = input_points self.output_points = output_points - self._condition_type = ['supervised', 'physics'] def __setattr__(self, key, value): if (key == 'input_points') or (key == 'output_points'): - check_consistency(value, (LabelTensor, Graph, torch.Tensor)) + check_consistency(value, (LabelTensor, Graph, torch.Tensor, torch_geometric.data.Data)) InputOutputPointsCondition.__dict__[key].__set__(self, value) elif key in ('_problem', '_condition_type'): super().__setattr__(key, value) diff --git a/pina/data/__init__.py b/pina/data/__init__.py index 2b3a126a..292c9ed1 100644 --- a/pina/data/__init__.py +++ b/pina/data/__init__.py @@ -2,14 +2,11 @@ Import data classes """ __all__ = [ - 'PinaDataLoader', 'SupervisedDataset', 'SamplePointDataset', - 'UnsupervisedDataset', 'Batch', 'PinaDataModule', 'BaseDataset' + 'PinaDataModule', + 'PinaDataset' ] -from .pina_dataloader import PinaDataLoader -from .supervised_dataset import SupervisedDataset -from .sample_dataset import SamplePointDataset -from .unsupervised_dataset import UnsupervisedDataset -from .pina_batch import Batch + + from .data_module import PinaDataModule -from .base_dataset import BaseDataset +from .dataset import PinaDataset diff --git a/pina/data/base_dataset.py b/pina/data/base_dataset.py deleted file mode 100644 index 2c28ba30..00000000 --- a/pina/data/base_dataset.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Basic data module implementation -""" -import torch -import logging - -from torch.utils.data import Dataset - -from ..label_tensor import LabelTensor - - -class BaseDataset(Dataset): - """ - BaseDataset class, which handle initialization and data retrieval - :var condition_indices: List of indices - :var device: torch.device - """ - - def __new__(cls, problem=None, device=torch.device('cpu')): - """ - Ensure correct definition of __slots__ before initialization - :param AbstractProblem problem: The formulation of the problem. - :param torch.device device: The device on which the - dataset will be loaded. - """ - if cls is BaseDataset: - raise TypeError( - 'BaseDataset cannot be instantiated directly. Use a subclass.') - if not hasattr(cls, '__slots__'): - raise TypeError( - 'Something is wrong, __slots__ must be defined in subclasses.') - return object.__new__(cls) - - def __init__(self, problem=None, device=torch.device('cpu')): - """" - Initialize the object based on __slots__ - :param AbstractProblem problem: The formulation of the problem. - :param torch.device device: The device on which the - dataset will be loaded. - """ - super().__init__() - self.empty = True - self.problem = problem - self.device = device - self.condition_indices = None - for slot in self.__slots__: - setattr(self, slot, []) - self.num_el_per_condition = [] - self.conditions_idx = [] - if self.problem is not None: - self._init_from_problem(self.problem.collector.data_collections) - self.initialized = False - - def _init_from_problem(self, collector_dict): - """ - TODO - """ - for name, data in collector_dict.items(): - keys = list(data.keys()) - if set(self.__slots__) == set(keys): - self._populate_init_list(data) - idx = [ - key for key, val in - self.problem.collector.conditions_name.items() - if val == name - ] - self.conditions_idx.append(idx) - self.initialize() - - def add_points(self, data_dict, condition_idx, batching_dim=0): - """ - This method filled internal lists of data points - :param data_dict: dictionary containing data points - :param condition_idx: index of the condition to which the data points - belong to - :param batching_dim: dimension of the batching - :raises: ValueError if the dataset has already been initialized - """ - if not self.initialized: - self._populate_init_list(data_dict, batching_dim) - self.conditions_idx.append(condition_idx) - self.empty = False - else: - raise ValueError('Dataset already initialized') - - def _populate_init_list(self, data_dict, batching_dim=0): - current_cond_num_el = None - for slot in data_dict.keys(): - slot_data = data_dict[slot] - if batching_dim != 0: - if isinstance(slot_data, (LabelTensor, torch.Tensor)): - dims = len(slot_data.size()) - slot_data = slot_data.permute( - [batching_dim] + - [dim for dim in range(dims) if dim != batching_dim]) - if current_cond_num_el is None: - current_cond_num_el = len(slot_data) - elif current_cond_num_el != len(slot_data): - raise ValueError('Different dimension in same condition') - current_list = getattr(self, slot) - current_list += [ - slot_data - ] if not (isinstance(slot_data, list)) else slot_data - self.num_el_per_condition.append(current_cond_num_el) - - def initialize(self): - """ - Initialize the datasets tensors/LabelTensors/lists given the lists - already filled - """ - logging.debug(f'Initialize dataset {self.__class__.__name__}') - - if self.num_el_per_condition: - self.condition_indices = torch.cat([ - torch.tensor([i] * self.num_el_per_condition[i], - dtype=torch.uint8) - for i in range(len(self.num_el_per_condition)) - ], - dim=0) - for slot in self.__slots__: - current_attribute = getattr(self, slot) - if all(isinstance(a, LabelTensor) for a in current_attribute): - setattr(self, slot, LabelTensor.vstack(current_attribute)) - self.initialized = True - - def __len__(self): - """ - :return: Number of elements in the dataset - """ - return len(getattr(self, self.__slots__[0])) - - def __getitem__(self, idx): - """ - :param idx: - :return: - """ - if not isinstance(idx, (tuple, list, slice, int)): - raise IndexError("Invalid index") - tensors = [] - for attribute in self.__slots__: - tensor = getattr(self, attribute) - if isinstance(attribute, (LabelTensor, torch.Tensor)): - tensors.append(tensor.__getitem__(idx)) - elif isinstance(attribute, list): - if isinstance(idx, (list, tuple)): - tensor = [tensor[i] for i in idx] - tensors.append(tensor) - return tensors - - def apply_shuffle(self, indices): - for slot in self.__slots__: - if slot != 'equation': - attribute = getattr(self, slot) - if isinstance(attribute, (LabelTensor, torch.Tensor)): - setattr(self, 'slot', attribute[[indices]]) - if isinstance(attribute, list): - setattr(self, 'slot', [attribute[i] for i in indices]) diff --git a/pina/data/data_module.py b/pina/data/data_module.py index bd117b54..c9af8aeb 100644 --- a/pina/data/data_module.py +++ b/pina/data/data_module.py @@ -1,17 +1,71 @@ -""" -This module provide basic data management functionalities -""" - +import logging +from lightning.pytorch import LightningDataModule import math import torch -import logging -from pytorch_lightning import LightningDataModule -from .sample_dataset import SamplePointDataset -from .supervised_dataset import SupervisedDataset -from .unsupervised_dataset import UnsupervisedDataset -from .pina_dataloader import PinaDataLoader -from .pina_subset import PinaSubset +from ..label_tensor import LabelTensor +from torch.utils.data import DataLoader, BatchSampler, SequentialSampler, \ + RandomSampler +from torch.utils.data.distributed import DistributedSampler +from .dataset import PinaDatasetFactory + +class Collator: + def __init__(self, max_conditions_lengths, ): + self.max_conditions_lengths = max_conditions_lengths + self.callable_function = self._collate_custom_dataloader if \ + max_conditions_lengths is None else ( + self._collate_standard_dataloader) + @staticmethod + def _collate_custom_dataloader(batch): + return batch[0] + + def _collate_standard_dataloader(self, batch): + """ + Function used to collate the batch + """ + batch_dict = {} + if isinstance(batch, dict): + return batch + conditions_names = batch[0].keys() + + # Condition names + for condition_name in conditions_names: + single_cond_dict = {} + condition_args = batch[0][condition_name].keys() + for arg in condition_args: + data_list = [batch[idx][condition_name][arg] for idx in range( + min(len(batch), + self.max_conditions_lengths[condition_name]))] + if isinstance(data_list[0], LabelTensor): + single_cond_dict[arg] = LabelTensor.stack(data_list) + elif isinstance(data_list[0], torch.Tensor): + single_cond_dict[arg] = torch.stack(data_list) + else: + raise NotImplementedError( + f"Data type {type(data_list[0])} not supported") + batch_dict[condition_name] = single_cond_dict + return batch_dict + + def __call__(self, batch): + return self.callable_function(batch) + + +class PinaBatchSampler(BatchSampler): + def __init__(self, dataset, batch_size, shuffle, sampler=None): + if sampler is None: + if (torch.distributed.is_available() and + torch.distributed.is_initialized()): + rank = torch.distributed.get_rank() + world_size = torch.distributed.get_world_size() + sampler = DistributedSampler(dataset, shuffle=shuffle, + rank=rank, num_replicas=world_size) + else: + if shuffle: + sampler = RandomSampler(dataset) + else: + sampler = SequentialSampler(dataset) + super().__init__(sampler=sampler, batch_size=batch_size, + drop_last=False) class PinaDataModule(LightningDataModule): """ @@ -20,160 +74,218 @@ class PinaDataModule(LightningDataModule): """ def __init__(self, - problem, - device, + collector, train_size=.7, - test_size=.1, - val_size=.2, + test_size=.2, + val_size=.1, predict_size=0., batch_size=None, shuffle=True, - datasets=None): + repeat=False, + automatic_batching=False + ): """ Initialize the object, creating dataset based on input problem - :param AbstractProblem problem: PINA problem - :param device: Device used for training and testing + :param Collector collector: PINA problem :param train_size: number/percentage of elements in train split :param test_size: number/percentage of elements in test split - :param eval_size: number/percentage of elements in evaluation split + :param val_size: number/percentage of elements in evaluation split :param batch_size: batch size used for training - :param datasets: list of datasets objects """ logging.debug('Start initialization of Pina DataModule') logging.info('Start initialization of Pina DataModule') super().__init__() - self.problem = problem - self.device = device - self.dataset_classes = [ - SupervisedDataset, UnsupervisedDataset, SamplePointDataset - ] - if datasets is None: - self.datasets = None - else: - self.datasets = datasets - - self.split_length = [] - self.split_names = [] - self.loader_functions = {} + self.default_batching = automatic_batching self.batch_size = batch_size - self.condition_names = problem.collector.conditions_name + self.shuffle = shuffle + self.repeat = repeat + # Begin Data splitting + splits_dict = {} if train_size > 0: - self.split_names.append('train') - self.split_length.append(train_size) - self.loader_functions['train_dataloader'] = lambda: PinaDataLoader( - self.splits['train'], self.batch_size, self.condition_names) + splits_dict['train'] = train_size + self.train_dataset = None + else: + self.train_dataloader = super().train_dataloader if test_size > 0: - self.split_length.append(test_size) - self.split_names.append('test') - self.loader_functions['test_dataloader'] = lambda: PinaDataLoader( - self.splits['test'], self.batch_size, self.condition_names) + splits_dict['test'] = test_size + self.test_dataset = None + else: + self.test_dataloader = super().test_dataloader if val_size > 0: - self.split_length.append(val_size) - self.split_names.append('val') - self.loader_functions['val_dataloader'] = lambda: PinaDataLoader( - self.splits['val'], self.batch_size, self.condition_names) + splits_dict['val'] = val_size + self.val_dataset = None + else: + self.val_dataloader = super().val_dataloader if predict_size > 0: - self.split_length.append(predict_size) - self.split_names.append('predict') - self.loader_functions['predict_dataloader'] = lambda: PinaDataLoader( - self.splits['predict'], self.batch_size, self.condition_names) - self.splits = {k: {} for k in self.split_names} - self.shuffle = shuffle - - for k, v in self.loader_functions.items(): - setattr(self, k, v) - - def prepare_data(self): - if self.datasets is None: - self._create_datasets() + splits_dict['predict'] = predict_size + self.predict_dataset = None + else: + self.predict_dataloader = super().predict_dataloader + self.collector_splits = self._create_splits(collector, splits_dict) def setup(self, stage=None): """ Perform the splitting of the dataset """ logging.debug('Start setup of Pina DataModule obj') - if self.datasets is None: - self._create_datasets() if stage == 'fit' or stage is None: - for dataset in self.datasets: - if len(dataset) > 0: - splits = self.dataset_split(dataset, - self.split_length, - shuffle=self.shuffle) - for i in range(len(self.split_length)): - self.splits[self.split_names[i]][ - dataset.data_type] = splits[i] + self.train_dataset = PinaDatasetFactory( + self.collector_splits['train'], + max_conditions_lengths=self.find_max_conditions_lengths( + 'train')) + if 'val' in self.collector_splits.keys(): + self.val_dataset = PinaDatasetFactory( + self.collector_splits['val'], + max_conditions_lengths=self.find_max_conditions_lengths( + 'val') + ) elif stage == 'test': - raise NotImplementedError("Testing pipeline not implemented yet") + self.test_dataset = PinaDatasetFactory( + self.collector_splits['test'], + max_conditions_lengths=self.find_max_conditions_lengths( + 'test') + ) + elif stage == 'predict': + self.predict_dataset = PinaDatasetFactory( + self.collector_splits['predict'], + max_conditions_lengths=self.find_max_conditions_lengths( + 'predict') + ) else: - raise ValueError("stage must be either 'fit' or 'test'") + raise ValueError( + "stage must be either 'fit' or 'test' or 'predict'." + ) @staticmethod - def dataset_split(dataset, lengths, seed=None, shuffle=True): - """ - Perform the splitting of the dataset - :param dataset: dataset object we wanted to split - :param lengths: lengths of elements in dataset - :param seed: random seed - :param shuffle: shuffle dataset - :return: split dataset - :rtype: PinaSubset - """ - if sum(lengths) - 1 < 1e-3: - len_dataset = len(dataset) - lengths = [ - int(math.floor(len_dataset * length)) for length in lengths - ] - remainder = len(dataset) - sum(lengths) - for i in range(remainder): - lengths[i % len(lengths)] += 1 - elif sum(lengths) - 1 >= 1e-3: - raise ValueError(f"Sum of lengths is {sum(lengths)} less than 1") - - if shuffle: - if seed is not None: - generator = torch.Generator() - generator.manual_seed(seed) - indices = torch.randperm(sum(lengths), generator=generator) - else: - indices = torch.randperm(sum(lengths)) - dataset.apply_shuffle(indices) + def _split_condition(condition_dict, splits_dict): + len_condition = len(condition_dict['input_points']) - indices = torch.arange(0, sum(lengths), 1, dtype=torch.uint8).tolist() - offsets = [ - sum(lengths[:i]) if i > 0 else 0 for i in range(len(lengths)) - ] - return [ - PinaSubset(dataset, indices[offset:offset + length]) - for offset, length in zip(offsets, lengths) + lengths = [ + int(math.floor(len_condition * length)) for length in + splits_dict.values() ] - def _create_datasets(self): + remainder = len_condition - sum(lengths) + for i in range(remainder): + lengths[i % len(lengths)] += 1 + splits_dict = {k: v for k, v in zip(splits_dict.keys(), lengths) + } + to_return_dict = {} + offset = 0 + for stage, stage_len in splits_dict.items(): + to_return_dict[stage] = {k: v[offset:offset + stage_len] + for k, v in condition_dict.items() if + k != 'equation' + # Equations are NEVER dataloaded + } + offset += stage_len + return to_return_dict + + def _create_splits(self, collector, splits_dict): """ - Create the dataset objects putting data + Create the dataset objects putting data """ + + # ----------- Auxiliary function ------------ + def _apply_shuffle(condition_dict, len_data): + idx = torch.randperm(len_data) + for k, v in condition_dict.items(): + if k == 'equation': + continue + if isinstance(v, list): + condition_dict[k] = [v[i] for i in idx] + elif isinstance(v, LabelTensor): + condition_dict[k] = LabelTensor(v.tensor[idx], + v.labels) + elif isinstance(v, torch.Tensor): + condition_dict[k] = v[idx] + else: + raise ValueError(f"Data type {type(v)} not supported") + # ----------- End auxiliary function ------------ + logging.debug('Dataset creation in PinaDataModule obj') - collector = self.problem.collector - batching_dim = self.problem.batching_dimension - datasets_slots = [i.__slots__ for i in self.dataset_classes] - self.datasets = [ - dataset(device=self.device) for dataset in self.dataset_classes + split_names = list(splits_dict.keys()) + dataset_dict = {name: {} for name in split_names} + for condition_name, condition_dict in collector.data_collections.items(): + len_data = len(condition_dict['input_points']) + if self.shuffle: + _apply_shuffle(condition_dict, len_data) + for key, data in self._split_condition(condition_dict, + splits_dict).items(): + dataset_dict[key].update({condition_name: data}) + return dataset_dict + + def find_max_conditions_lengths(self, split): + max_conditions_lengths = {} + for k, v in self.collector_splits[split].items(): + if self.batch_size is None: + max_conditions_lengths[k] = len(v['input_points']) + elif self.repeat: + max_conditions_lengths[k] = self.batch_size + else: + max_conditions_lengths[k] = min(len(v['input_points']), + self.batch_size) + return max_conditions_lengths + + def val_dataloader(self): + """ + Create the validation dataloader + """ + + batch_size = self.batch_size if self.batch_size is not None else len( + self.val_dataset) + + # Use default batching in torch DataLoader (good is batch size is small) + if self.default_batching: + collate = Collator(self.find_max_conditions_lengths('val')) + return DataLoader(self.val_dataset, self.batch_size, + collate_fn=collate) + collate = Collator(None) + # Use custom batching (good if batch size is large) + sampler = PinaBatchSampler(self.val_dataset, batch_size, shuffle=False) + return DataLoader(self.val_dataset, sampler=sampler, + collate_fn=collate) + + def train_dataloader(self): + """ + Create the training dataloader + """ + # Use default batching in torch DataLoader (good is batch size is small) + if self.default_batching: + collate = Collator(self.find_max_conditions_lengths('train')) + return DataLoader(self.train_dataset, self.batch_size, + collate_fn=collate) + collate = Collator(None) + # Use custom batching (good if batch size is large) + batch_size = self.batch_size if self.batch_size is not None else len( + self.train_dataset) + sampler = PinaBatchSampler(self.train_dataset, batch_size, + shuffle=False) + return DataLoader(self.train_dataset, sampler=sampler, + collate_fn=collate) + + def test_dataloader(self): + """ + Create the testing dataloader + """ + raise NotImplementedError("Test dataloader not implemented") + + def predict_dataloader(self): + """ + Create the prediction dataloader + """ + raise NotImplementedError("Predict dataloader not implemented") + + def transfer_batch_to_device(self, batch, device, dataloader_idx): + """ + Transfer the batch to the device. This method is called in the + training loop and is used to transfer the batch to the device. + """ + batch = [ + (k, super(LightningDataModule, self).transfer_batch_to_device(v, + device, + dataloader_idx)) + for k, v in batch.items() ] - logging.debug('Filling datasets in PinaDataModule obj') - for name, data in collector.data_collections.items(): - keys = list(data.keys()) - idx = [ - key for key, val in collector.conditions_name.items() - if val == name - ] - for i, slot in enumerate(datasets_slots): - if slot == keys: - self.datasets[i].add_points(data, idx[0], batching_dim) - continue - datasets = [] - for dataset in self.datasets: - if not dataset.empty: - dataset.initialize() - datasets.append(dataset) - self.datasets = datasets + return batch diff --git a/pina/data/dataset.py b/pina/data/dataset.py new file mode 100644 index 00000000..0bc92371 --- /dev/null +++ b/pina/data/dataset.py @@ -0,0 +1,102 @@ +""" +This module provide basic data management functionalities +""" +import torch +from torch.utils.data import Dataset +from abc import abstractmethod +from torch_geometric.data import Batch + +class PinaDatasetFactory: + """ + Factory class for the PINA dataset. Depending on the type inside the + conditions it creates a different dataset object: + - PinaTensorDataset for torch.Tensor + - PinaGraphDataset for list of torch_geometric.data.Data objects + """ + def __new__(cls, conditions_dict, **kwargs): + if len(conditions_dict) == 0: + raise ValueError('No conditions provided') + if all([isinstance(v['input_points'], torch.Tensor) for v + in conditions_dict.values()]): + return PinaTensorDataset(conditions_dict, **kwargs) + elif all([isinstance(v['input_points'], list) for v + in conditions_dict.values()]): + return PinaGraphDataset(conditions_dict, **kwargs) + raise ValueError('Conditions must be either torch.Tensor or list of Data ' + 'objects.') + +class PinaDataset(Dataset): + """ + Abstract class for the PINA dataset + """ + def __init__(self, conditions_dict, max_conditions_lengths): + self.conditions_dict = conditions_dict + self.max_conditions_lengths = max_conditions_lengths + self.conditions_length = {k: len(v['input_points']) for k, v in + self.conditions_dict.items()} + self.length = max(self.conditions_length.values()) + + def _get_max_len(self): + max_len = 0 + for condition in self.conditions_dict.values(): + max_len = max(max_len, len(condition['input_points'])) + return max_len + + def __len__(self): + return self.length + + @abstractmethod + def __getitem__(self, item): + pass + +class PinaTensorDataset(PinaDataset): + def __init__(self, conditions_dict, max_conditions_lengths, + ): + super().__init__(conditions_dict, max_conditions_lengths) + + def _getitem_int(self, idx): + return { + k: {k_data: v[k_data][idx % len(v['input_points'])] for k_data + in v.keys()} for k, v in self.conditions_dict.items() + } + + def _getitem_list(self, idx): + to_return_dict = {} + for condition, data in self.conditions_dict.items(): + cond_idx = idx[:self.max_conditions_lengths[condition]] + condition_len = self.conditions_length[condition] + if self.length > condition_len: + cond_idx = [idx%condition_len for idx in cond_idx] + to_return_dict[condition] = {k: v[cond_idx] + for k, v in data.items()} + return to_return_dict + + def __getitem__(self, idx): + if isinstance(idx, int): + return self._getitem_int(idx) + return self._getitem_list(idx) + +class PinaGraphDataset(PinaDataset): + pass + """ + def __init__(self, conditions_dict, max_conditions_lengths): + super().__init__(conditions_dict, max_conditions_lengths) + + def __getitem__(self, idx): + + Getitem method for large batch size + + to_return_dict = {} + for condition, data in self.conditions_dict.items(): + cond_idx = idx[:self.max_conditions_lengths[condition]] + condition_len = self.conditions_length[condition] + if self.length > condition_len: + cond_idx = [idx%condition_len for idx in cond_idx] + to_return_dict[condition] = {k: Batch.from_data_list([v[i] + for i in cond_idx]) + if isinstance(v, list) + else v[cond_idx].tensor.reshape(-1, v.size(-1)) + for k, v in data.items() + } + return to_return_dict + """ diff --git a/pina/data/pina_batch.py b/pina/data/pina_batch.py deleted file mode 100644 index c5d1b61d..00000000 --- a/pina/data/pina_batch.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Batch management module -""" -from .pina_subset import PinaSubset - - -class Batch: - """ - Implementation of the Batch class used during training to perform SGD - optimization. - """ - - def __init__(self, dataset_dict, idx_dict, require_grad=True): - self.attributes = [] - for k, v in dataset_dict.items(): - setattr(self, k, v) - self.attributes.append(k) - - for k, v in idx_dict.items(): - setattr(self, k + '_idx', v) - self.require_grad = require_grad - - def __len__(self): - """ - Returns the number of elements in the batch - :return: number of elements in the batch - :rtype: int - """ - length = 0 - for dataset in dir(self): - attribute = getattr(self, dataset) - if isinstance(attribute, list): - length += len(getattr(self, dataset)) - return length - - def __getattribute__(self, item): - if item in super().__getattribute__('attributes'): - dataset = super().__getattribute__(item) - index = super().__getattribute__(item + '_idx') - return PinaSubset(dataset.dataset, dataset.indices[index]) - return super().__getattribute__(item) - - def __getattr__(self, item): - if item == 'data' and len(self.attributes) == 1: - item = self.attributes[0] - return super().__getattribute__(item) - raise AttributeError(f"'Batch' object has no attribute '{item}'") diff --git a/pina/data/pina_dataloader.py b/pina/data/pina_dataloader.py deleted file mode 100644 index e2d3fb76..00000000 --- a/pina/data/pina_dataloader.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -This module is used to create an iterable object used during training -""" -import math -from .pina_batch import Batch - - -class PinaDataLoader: - """ - This class is used to create a dataloader to use during the training. - - :var condition_names: The names of the conditions. The order is consistent - with the condition indeces in the batches. - :vartype condition_names: list[str] - """ - - def __init__(self, dataset_dict, batch_size, condition_names) -> None: - """ - Initialize local variables - :param dataset_dict: Dictionary of datasets - :type dataset_dict: dict - :param batch_size: Size of the batch - :type batch_size: int - :param condition_names: Names of the conditions - :type condition_names: list[str] - """ - self.condition_names = condition_names - self.dataset_dict = dataset_dict - self._init_batches(batch_size) - - def _init_batches(self, batch_size=None): - """ - Create batches according to the batch_size provided in input. - """ - self.batches = [] - n_elements = sum(len(v) for v in self.dataset_dict.values()) - if batch_size is None: - batch_size = n_elements - indexes_dict = {} - n_batches = int(math.ceil(n_elements / batch_size)) - for k, v in self.dataset_dict.items(): - if n_batches != 1: - indexes_dict[k] = math.floor(len(v) / (n_batches - 1)) - else: - indexes_dict[k] = len(v) - for i in range(n_batches): - temp_dict = {} - for k, v in indexes_dict.items(): - if i != n_batches - 1: - temp_dict[k] = slice(i * v, (i + 1) * v) - else: - temp_dict[k] = slice(i * v, len(self.dataset_dict[k])) - self.batches.append( - Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict)) - - def __iter__(self): - """ - Makes dataloader object iterable - """ - yield from self.batches - - def __len__(self): - """ - Return the number of batches. - :return: The number of batches. - :rtype: int - """ - return len(self.batches) diff --git a/pina/data/pina_subset.py b/pina/data/pina_subset.py deleted file mode 100644 index 275541e9..00000000 --- a/pina/data/pina_subset.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Module for PinaSubset class -""" -from pina import LabelTensor -from torch import Tensor, float32 - - -class PinaSubset: - """ - TODO - """ - __slots__ = ['dataset', 'indices', 'require_grad'] - - def __init__(self, dataset, indices, require_grad=True): - """ - TODO - """ - self.dataset = dataset - self.indices = indices - self.require_grad = require_grad - - def __len__(self): - """ - TODO - """ - return len(self.indices) - - def __getattr__(self, name): - tensor = self.dataset.__getattribute__(name) - if isinstance(tensor, (LabelTensor, Tensor)): - tensor = tensor[[self.indices]].to(self.dataset.device) - return tensor.requires_grad_( - self.require_grad) if tensor.dtype == float32 else tensor - if isinstance(tensor, list): - return [tensor[i] for i in self.indices] - raise AttributeError(f"No attribute named {name}") diff --git a/pina/data/sample_dataset.py b/pina/data/sample_dataset.py deleted file mode 100644 index bc3bca33..00000000 --- a/pina/data/sample_dataset.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Sample dataset module -""" -from copy import deepcopy -from .base_dataset import BaseDataset -from ..condition import InputPointsEquationCondition - - -class SamplePointDataset(BaseDataset): - """ - This class extends the BaseDataset to handle physical datasets - composed of only input points. - """ - data_type = 'physics' - __slots__ = InputPointsEquationCondition.__slots__ - - def add_points(self, data_dict, condition_idx, batching_dim=0): - data_dict = deepcopy(data_dict) - data_dict.pop('equation') - super().add_points(data_dict, condition_idx) - - def _init_from_problem(self, collector_dict): - for name, data in collector_dict.items(): - keys = list(data.keys()) - if set(self.__slots__) == set(keys): - data = deepcopy(data) - data.pop('equation') - self._populate_init_list(data) - idx = [ - key for key, val in - self.problem.collector.conditions_name.items() - if val == name - ] - self.conditions_idx.append(idx) - self.initialize() diff --git a/pina/data/supervised_dataset.py b/pina/data/supervised_dataset.py deleted file mode 100644 index be601050..00000000 --- a/pina/data/supervised_dataset.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Supervised dataset module -""" -from .base_dataset import BaseDataset - - -class SupervisedDataset(BaseDataset): - """ - This class extends the BaseDataset to handle datasets that consist of - input-output pairs. - """ - data_type = 'supervised' - __slots__ = ['input_points', 'output_points'] diff --git a/pina/data/unsupervised_dataset.py b/pina/data/unsupervised_dataset.py deleted file mode 100644 index 18cf296f..00000000 --- a/pina/data/unsupervised_dataset.py +++ /dev/null @@ -1,14 +0,0 @@ -""" -Unsupervised dataset module -""" -from .base_dataset import BaseDataset - - -class UnsupervisedDataset(BaseDataset): - """ - This class extend BaseDataset class to handle - unsupervised dataset,composed of input points - and, optionally, conditional variables - """ - data_type = 'unsupervised' - __slots__ = ['input_points', 'conditional_variables'] diff --git a/pina/graph.py b/pina/graph.py index 97b2770e..bde5bbf5 100644 --- a/pina/graph.py +++ b/pina/graph.py @@ -93,8 +93,8 @@ def _build_radius(**kwargs): logging.debug(f"edge_index computed") return Data( - x=nodes_data, - pos=nodes_coordinates, + x=nodes_data.tensor, + pos=nodes_coordinates.tensor, edge_index=edge_index, edge_attr=edges_data, ) diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 719975c5..a3cf5d23 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -4,26 +4,20 @@ from torch import Tensor -def issubset(a, b): - """ - Check if a is a subset of b. - """ - if isinstance(a, list) and isinstance(b, list): - return set(a).issubset(set(b)) - if isinstance(a, range) and isinstance(b, range): - return a.start <= b.start and a.stop >= b.stop - return False - +full_labels = True +MATH_FUNCTIONS = {torch.sin, torch.cos} class LabelTensor(torch.Tensor): """Torch tensor with a label for any column.""" @staticmethod def __new__(cls, x, labels, *args, **kwargs): + full = kwargs.pop("full", full_labels) + if isinstance(x, LabelTensor): + x.full = full return x - else: - return super().__new__(cls, x, *args, **kwargs) + return super().__new__(cls, x, *args, **kwargs) @property def tensor(self): @@ -40,22 +34,11 @@ def __init__(self, x, labels, **kwargs): {1: {"name": "space"['a', 'b', 'c']) """ - self.dim_names = None - self.full = kwargs.get('full', True) - self.labels = labels - - @classmethod - def __internal_init__(cls, - x, - labels, - dim_names, - *args, - **kwargs): - lt = cls.__new__(cls, x, labels, *args, **kwargs) - lt._labels = labels - lt.full = kwargs.get('full', True) - lt.dim_names = dim_names - return lt + self.full = kwargs.get('full', full_labels) + if labels is not None: + self.labels = labels + else: + self._labels = {} @property def labels(self): @@ -104,14 +87,13 @@ def labels(self, labels): self._labels = {} if isinstance(labels, dict): self._init_labels_from_dict(labels) - elif isinstance(labels, list): + elif isinstance(labels, (list, range)): self._init_labels_from_list(labels) elif isinstance(labels, str): labels = [labels] self._init_labels_from_list(labels) else: raise ValueError("labels must be list, dict or string.") - self.set_names() def _init_labels_from_dict(self, labels): """ @@ -125,34 +107,38 @@ def _init_labels_from_dict(self, labels): """ tensor_shape = self.shape + # Set all labels if full_labels is True if hasattr(self, 'full') and self.full: labels = { i: labels[i] if i in labels else { - 'name': i + 'name': i, 'dof': range(tensor_shape[i]) } - for i in labels.keys() + for i in range(len(tensor_shape)) } + for k, v in labels.items(): + # Init labels from str if isinstance(v, str): v = {'name': v, 'dof': range(tensor_shape[k])} + # Init labels from dict - elif isinstance(v, dict) and list(v.keys()) == ['name']: - # Init from dict with only name key - v['dof'] = range(tensor_shape[k]) - # Init from dict with both name and dof keys - elif isinstance(v, dict) and sorted(list( - v.keys())) == ['dof', 'name']: - dof_list = v['dof'] - dof_len = len(dof_list) - if dof_len != len(set(dof_list)): - raise ValueError("dof must be unique") - if dof_len != tensor_shape[k]: - raise ValueError( - 'Number of dof does not match tensor shape') + elif isinstance(v, dict): + # Only name of the dimension if provided + if list(v.keys()) == ['name']: + v['dof'] = range(tensor_shape[k]) + # Both name and dof are provided + elif sorted(list(v.keys())) == ['dof', 'name']: + dof_list = v['dof'] + dof_len = len(dof_list) + if dof_len != len(set(dof_list)): + raise ValueError("dof must be unique") + if dof_len != tensor_shape[k]: + raise ValueError( + 'Number of dof does not match tensor shape') else: raise ValueError('Illegal labels initialization') - # Perform update + # Assign labels values self._labels[k] = v def _init_labels_from_list(self, labels): @@ -172,75 +158,71 @@ def _init_labels_from_list(self, labels): } self._init_labels_from_dict(last_dim_labels) - def set_names(self): - labels = self.stored_labels - self.dim_names = {} - for dim in labels.keys(): - self.dim_names[labels[dim]['name']] = dim - def extract(self, labels_to_extract): """ Extract the subset of the original tensor by returning all the columns corresponding to the passed ``label_to_extract``. - :param label_to_extract: The label(s) to extract. - :type label_to_extract: str | list(str) | tuple(str) + :param labels_to_extract: The label(s) to extract. + :type labels_to_extract: str | list(str) | tuple(str) :raises TypeError: Labels are not ``str``. :raises ValueError: Label to extract is not in the labels ``list``. """ # Convert str/int to string + def find_names(labels): + dim_names = {} + for dim in labels.keys(): + dim_names[labels[dim]['name']] = dim + return dim_names + if isinstance(labels_to_extract, (str, int)): labels_to_extract = [labels_to_extract] # Store useful variables - labels = self.stored_labels + labels = copy(self._labels) stored_keys = labels.keys() - dim_names = self.dim_names + dim_names = find_names(labels) ndim = len(super().shape) - # Convert tuple/list to dict + # Convert tuple/list to dict (having a list as input + # means that we want to extract a values from the last dimension) if isinstance(labels_to_extract, (tuple, list)): if not ndim - 1 in stored_keys: raise ValueError( "LabelTensor does not have labels in last dimension") - name = labels[max(stored_keys)]['name'] + name = labels[ndim-1]['name'] labels_to_extract = {name: list(labels_to_extract)} # If labels_to_extract is not dict then rise error if not isinstance(labels_to_extract, dict): raise ValueError('labels_to_extract must be str or list or dict') - # Make copy of labels (avoid issue in consistency) - updated_labels = {k: copy(v) for k, v in labels.items()} - # Initialize list used to perform extraction - extractor = [slice(None) for _ in range(ndim)] + extractor = [slice(None)]*ndim # Loop over labels_to_extract dict - for k, v in labels_to_extract.items(): + for dim_name, labels_te in labels_to_extract.items(): # If label is not find raise value error - idx_dim = dim_names.get(k) + idx_dim = dim_names.get(dim_name, None) if idx_dim is None: raise ValueError( 'Cannot extract label with is not in original labels') dim_labels = labels[idx_dim]['dof'] - v = [v] if isinstance(v, (int, str)) else v - - if not isinstance(v, range): - extractor[idx_dim] = [dim_labels.index(i) - for i in v] if len(v) > 1 else slice( - dim_labels.index(v[0]), - dim_labels.index(v[0]) + 1) + labels_te = [labels_te] if isinstance(labels_te, (int, str)) else labels_te + if not isinstance(labels_te, range): + #If is done to keep the dimension if there is only one extracted label + extractor[idx_dim] = [dim_labels.index(i) for i in labels_te] \ + if len(labels_te)>1 else slice(dim_labels.index(labels_te[0]), dim_labels.index(labels_te[0])+1) else: - extractor[idx_dim] = slice(v.start, v.stop) + extractor[idx_dim] = slice(labels_te.start, labels_te.stop) - updated_labels.update({idx_dim: {'dof': v, 'name': k}}) + labels.update({idx_dim: {'dof': labels_te, 'name': dim_name}}) - tensor = self.tensor - tensor = tensor[extractor] - return LabelTensor.__internal_init__(tensor, updated_labels, dim_names) + tensor = super().__getitem__(extractor).as_subclass(LabelTensor) + tensor._labels = labels + return tensor def __str__(self): """ @@ -272,39 +254,53 @@ def cat(tensors, dim=0): return [] if len(tensors) == 1 or isinstance(tensors, LabelTensor): return tensors[0] + # Perform cat on tensors new_tensor = torch.cat(tensors, dim=dim) - # Update labels - labels = LabelTensor.__create_labels_cat(tensors, dim) + # --------- Start definition auxiliary function ------ + # Compute and update labels + def create_labels_cat(tensors, dim, tensor_shape): + stored_labels = [tensor.stored_labels for tensor in tensors] + keys = stored_labels[0].keys() + + if any(not all(stored_labels[i][k] == stored_labels[0][k] for i in + range(len(stored_labels))) for k in keys if k != dim): + raise RuntimeError('tensors must have the same shape and dof') + + # Copy labels from the first tensor and update the 'dof' for dimension `dim` + labels = copy(stored_labels[0]) + if dim in labels: + labels_list = [tensor[dim]['dof'] for tensor in stored_labels] + last_dim_dof = range(tensor_shape[dim]) if all(isinstance(label, range) + for label in labels_list) else sum(labels_list, []) + labels[dim]['dof'] = last_dim_dof + return labels + # --------- End definition auxiliary function ------ - return LabelTensor.__internal_init__(new_tensor, labels, - tensors[0].dim_names) + # Update labels + if dim in tensors[0].stored_labels.keys(): + new_tensor_shape = new_tensor.shape + labels = create_labels_cat(tensors, dim, new_tensor_shape) + else: + labels = tensors[0].stored_labels + new_tensor._labels = labels + return new_tensor @staticmethod - def __create_labels_cat(tensors, dim): - # Check if names and dof of the labels are the same in all dimensions - # except in dim - stored_labels = [tensor.stored_labels for tensor in tensors] - - # check if: - # - labels dict have same keys - # - all labels are the same expect for dimension dim - if not all( - all(stored_labels[i][k] == stored_labels[0][k] - for i in range(len(stored_labels))) - for k in stored_labels[0].keys() if k != dim): - raise RuntimeError('tensors must have the same shape and dof') - - labels = {k: copy(v) for k, v in tensors[0].stored_labels.items()} - if dim in labels.keys(): - last_dim_dof = [i for j in stored_labels for i in j[dim]['dof']] - labels[dim]['dof'] = last_dim_dof - return labels + def stack(tensors): + new_tensor = torch.stack(tensors) + labels = tensors[0]._labels + labels = {key + 1: value for key, value in labels.items()} + if full_labels: + new_tensor.labels = labels + else: + new_tensor._labels = labels + return new_tensor def requires_grad_(self, mode=True): lt = super().requires_grad_(mode) - lt.labels = self._labels + lt._labels = self._labels return lt @property @@ -316,10 +312,9 @@ def to(self, *args, **kwargs): Performs Tensor dtype and/or device conversion. For more details, see :meth:`torch.Tensor.to`. """ - tmp = super().to(*args, **kwargs) - new = self.__class__.clone(self) - new.data = tmp.data - return new + lt = super().to(*args, **kwargs) + lt._labels = self._labels + return lt def clone(self, *args, **kwargs): """ @@ -329,8 +324,7 @@ def clone(self, *args, **kwargs): :return: A copy of the tensor. :rtype: LabelTensor """ - labels = {k: copy(v) for k, v in self._labels.items()} - out = LabelTensor(super().clone(*args, **kwargs), labels) + out = LabelTensor(super().clone(*args, **kwargs), deepcopy(self._labels)) return out @staticmethod @@ -348,7 +342,7 @@ def summation(tensors): raise RuntimeError('Tensors must have the same shape and labels') last_dim_labels = [] - data = torch.zeros(tensors[0].tensor.shape) + data = torch.zeros(tensors[0].tensor.shape).to(tensors[0].device) for tensor in tensors: data += tensor.tensor last_dim_labels.append(tensor.labels) @@ -396,82 +390,114 @@ def vstack(label_tensors): """ return LabelTensor.cat(label_tensors, dim=0) + # ---------------------- Start auxiliary function definition ----- + # This method is used to update labels + def _update_single_label(self, old_labels, to_update_labels, index, dim, + to_update_dim): + """ + TODO + :param old_labels: labels from which retrieve data + :param to_update_labels: labels to update + :param index: index of dof to retain + :param dim: label index + :return: + """ + old_dof = old_labels[to_update_dim]['dof'] + if isinstance(index, slice): + to_update_labels.update({ + dim: { + 'dof': old_dof[index], + 'name': old_labels[dim]['name'] + } + }) + return + if isinstance(index, int): + index = [index] + if isinstance(index, (list, torch.Tensor)): + to_update_labels.update({ + dim: { + 'dof': [old_dof[i] for i in index] if isinstance(old_dof, list) else index, + 'name': old_labels[dim]['name'] + } + }) + return + raise NotImplementedError(f'Getitem not implemented for ' + f'{type(index)} values') + # ---------------------- End auxiliary function definition ----- + + def __getitem__(self, index): """ TODO: Complete docstring :param index: :return: """ - if isinstance(index, - str) or (isinstance(index, (tuple, list)) - and all(isinstance(a, str) for a in index)): + # Index are str --> call extract + if isinstance(index, str) or (isinstance(index, (tuple, list)) + and all( + isinstance(a, str) for a in index)): return self.extract(index) + # Store important variables selected_lt = super().__getitem__(index) + stored_labels = self._labels + labels = copy(stored_labels) - if isinstance(index, (int, slice)): + # Put here because it is the most common case (int as index). + # Used by DataLoader -> put here for efficiency purpose + if isinstance(index, list): + if 0 in labels.keys(): + self._update_single_label(stored_labels, labels, index, + 0, 0) + selected_lt._labels = labels + return selected_lt + + if isinstance(index, int): + labels.pop(0, None) + labels = {key - 1 if key > 0 else key: value for key, value in + labels.items()} + selected_lt._labels = labels + return selected_lt + + if not isinstance(index, (tuple, torch.Tensor)): index = [index] + # Ellipsis are used to perform operation on the last dimension if index[0] == Ellipsis: - index = [slice(None)] * (self.ndim - 1) + [index[1]] - - if hasattr(self, "labels"): - labels = {k: copy(v) for k, v in self.stored_labels.items()} - for j, idx in enumerate(index): - if isinstance(idx, int): + if len(self.shape) in labels: + self._update_single_label(stored_labels, labels, index, 0, 0) + selected_lt._labels = labels + return selected_lt + + i = 0 + for j, idx in enumerate(index): + if j in self.stored_labels.keys(): + if isinstance(idx, int) or ( + isinstance(idx, torch.Tensor) and idx.ndim == 0): selected_lt = selected_lt.unsqueeze(j) - if j in labels.keys() and idx != slice(None): - self._update_single_label(labels, labels, idx, j) - selected_lt = LabelTensor.__internal_init__(selected_lt, labels, - self.dim_names) + if idx != slice(None): + self._update_single_label(stored_labels, labels, idx, j, i) + else: + if isinstance(idx, int): + labels = {key - 1 if key > j else key: + value for key, value in labels.items()} + continue + i += 1 + selected_lt._labels = labels return selected_lt - @staticmethod - def _update_single_label(old_labels, to_update_labels, index, dim): - """ - TODO - :param old_labels: labels from which retrieve data - :param to_update_labels: labels to update - :param index: index of dof to retain - :param dim: label index - :return: - """ - old_dof = old_labels[dim]['dof'] - if not isinstance( - index, - (int, slice)) and len(index) == len(old_dof) and isinstance( - old_dof, range): - return - if isinstance(index, torch.Tensor): - index = index.nonzero( - as_tuple=True - )[0] if index.dtype == torch.bool else index.tolist() - if isinstance(index, list): - to_update_labels.update({ - dim: { - 'dof': [old_dof[i] for i in index], - 'name': old_labels[dim]['name'] - } - }) - else: - to_update_labels.update( - {dim: { - 'dof': old_dof[index], - 'name': old_labels[dim]['name'] - }}) - def sort_labels(self, dim=None): - def arg_sort(lst): return sorted(range(len(lst)), key=lambda x: lst[x]) - if dim is None: dim = self.ndim - 1 + if self.shape[dim] == 1: + return self labels = self.stored_labels[dim]['dof'] sorted_index = arg_sort(labels) indexer = [slice(None)] * self.ndim indexer[dim] = sorted_index - return self.__getitem__(indexer) + return self.__getitem__(tuple(indexer)) def __deepcopy__(self, memo): cls = self.__class__ @@ -480,10 +506,16 @@ def __deepcopy__(self, memo): def permute(self, *dims): tensor = super().permute(*dims) - stored_labels = self.stored_labels + labels = self._labels keys_list = list(*dims) labels = { - keys_list.index(k): copy(stored_labels[k]) - for k in stored_labels.keys() + keys_list.index(k): labels[k] + for k in labels.keys() } - return LabelTensor.__internal_init__(tensor, labels, self.dim_names) + tensor._labels = labels + return tensor + + def detach(self): + lt = super().detach() + lt._labels = self.stored_labels + return lt \ No newline at end of file diff --git a/pina/model/layers/lowrank_layer.py b/pina/model/layers/lowrank_layer.py index 80fb43e4..c36d2795 100644 --- a/pina/model/layers/lowrank_layer.py +++ b/pina/model/layers/lowrank_layer.py @@ -119,6 +119,7 @@ def forward(self, x, coords): :rtype: torch.Tensor """ # extract basis + coords = coords.as_subclass(torch.Tensor) basis = self._basis(coords) # reshape [B, N, D, 2*rank] shape = list(basis.shape[:-1]) + [-1, 2 * self.rank] diff --git a/pina/model/network.py b/pina/model/network.py index 6fde8039..aed3dff3 100644 --- a/pina/model/network.py +++ b/pina/model/network.py @@ -29,7 +29,8 @@ class is used internally in PINA to convert # check model consistency check_consistency(model, nn.Module) check_consistency(input_variables, str) - check_consistency(output_variables, str) + if output_variables is not None: + check_consistency(output_variables, str) self._model = model self._input_variables = input_variables @@ -67,16 +68,15 @@ def forward(self, x): # in case `input_variables = []` all points are used if self._input_variables: x = x.extract(self._input_variables) - # extract features and append for feature in self._extra_features: x = x.append(feature(x)) # perform forward pass + converting to LabelTensor - output = self._model(x).as_subclass(LabelTensor) - - # set the labels for LabelTensor - output.labels = self._output_variables + x = x.as_subclass(torch.Tensor) + output = self._model(x) + if self._output_variables is not None: + output = LabelTensor(output, self._output_variables) return output @@ -97,15 +97,9 @@ def forward_map(self, x): This function does not extract the input variables, all the variables are used for both tensors. Output variables are correctly applied. """ - # convert LabelTensor s to torch.Tensor s - x = list(map(lambda x: x.as_subclass(torch.Tensor), x)) # perform forward pass (using torch.Tensor) + converting to LabelTensor - output = self._model(x).as_subclass(LabelTensor) - - # set the labels for LabelTensor - output.labels = self._output_variables - + output = LabelTensor(self._model(x.tensor), self._output_variables) return output @property diff --git a/pina/operators.py b/pina/operators.py index 0b306dfb..ef389a64 100644 --- a/pina/operators.py +++ b/pina/operators.py @@ -63,11 +63,9 @@ def grad_scalar_output(output_, input_, d): retain_graph=True, allow_unused=True, )[0] - - gradients.labels = input_.labels - gradients = gradients.extract(d) + gradients.labels = input_.stored_labels + gradients = gradients[..., [input_.labels.index(i) for i in d]] gradients.labels = [f"d{output_fieldname}d{i}" for i in d] - return gradients if not isinstance(input_, LabelTensor): @@ -190,7 +188,9 @@ def laplacian(output_, input_, components=None, d=None, method="std"): to_append_tensors = [] for i, label in enumerate(grad_output.labels): gg = grad(grad_output, input_, d=d, components=[label]) - to_append_tensors.append(gg.extract([gg.labels[i]])) + gg = gg.extract([gg.labels[i]]) + + to_append_tensors.append(gg) labels = [f"dd{components[0]}"] result = LabelTensor.summation(tensors=to_append_tensors) result.labels = labels diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index 6897fbb7..5f424cf5 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -90,10 +90,9 @@ def input_variables(self): variables += self.spatial_variables if hasattr(self, "temporal_variable"): variables += self.temporal_variable - if hasattr(self, "unknown_parameters"): + if hasattr(self, "parameters"): variables += self.parameters - if hasattr(self, "custom_variables"): - variables += self.custom_variables + return variables @@ -170,7 +169,6 @@ def discretise_domain(self, f"Wrong variables for sampling. Variables ", f"should be in {self.input_variables}.", ) - # check correct location if locations == "all": locations = [ diff --git a/pina/problem/inverse_problem.py b/pina/problem/inverse_problem.py index 51cbd3ca..e54495a6 100644 --- a/pina/problem/inverse_problem.py +++ b/pina/problem/inverse_problem.py @@ -1,7 +1,6 @@ """Module for the ParametricProblem class""" - +import torch from abc import abstractmethod - from .abstract_problem import AbstractProblem diff --git a/pina/solvers/graph.py b/pina/solvers/graph.py deleted file mode 100644 index 9af04e76..00000000 --- a/pina/solvers/graph.py +++ /dev/null @@ -1,34 +0,0 @@ -from .supervised import SupervisedSolver -from ..graph import Graph - - -class GraphSupervisedSolver(SupervisedSolver): - - def __init__( - self, - problem, - model, - nodes_coordinates, - nodes_data, - loss=None, - optimizer=None, - scheduler=None): - super().__init__(problem, model, loss, optimizer, scheduler) - if isinstance(nodes_coordinates, str): - self._nodes_coordinates = [nodes_coordinates] - else: - self._nodes_coordinates = nodes_coordinates - if isinstance(nodes_data, str): - self._nodes_data = nodes_data - else: - self._nodes_data = nodes_data - - def forward(self, input): - input_coords = input.extract(self._nodes_coordinates) - input_data = input.extract(self._nodes_data) - - if not isinstance(input, Graph): - input = Graph.build('radius', nodes_coordinates=input_coords, nodes_data=input_data, radius=0.2) - g = self.model(input.data, edge_index=input.data.edge_index) - g.labels = {1: {'name': 'output', 'dof': ['u']}} - return g diff --git a/pina/solvers/pinns/basepinn.py b/pina/solvers/pinns/basepinn.py index 543f823f..588d7314 100644 --- a/pina/solvers/pinns/basepinn.py +++ b/pina/solvers/pinns/basepinn.py @@ -1,14 +1,15 @@ """ Module for PINN """ -import sys from abc import ABCMeta, abstractmethod import torch - -from ...solvers.solver import SolverInterface -from pina.utils import check_consistency -from pina.loss.loss_interface import LossInterface -from pina.problem import InverseProblem from torch.nn.modules.loss import _Loss +from ...condition import InputOutputPointsCondition +from ...solvers.solver import SolverInterface +from ...utils import check_consistency +from ...loss.loss_interface import LossInterface +from ...problem import InverseProblem +from ...condition import DomainEquationCondition +from ...optim import TorchOptimizer, TorchScheduler torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 @@ -25,13 +26,14 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta): to the user to choose which problem the implemented solver inheriting from this class is suitable for. """ - + accepted_condition_types = [DomainEquationCondition.condition_type[0], + InputOutputPointsCondition.condition_type[0]] def __init__( self, models, problem, optimizers, - optimizers_kwargs, + schedulers, extra_features, loss, ): @@ -53,11 +55,20 @@ def __init__( :param torch.nn.Module loss: The loss function used as minimizer, default :class:`torch.nn.MSELoss`. """ + if optimizers is None: + optimizers = TorchOptimizer(torch.optim.Adam, lr=0.001) + + if schedulers is None: + schedulers = TorchScheduler(torch.optim.lr_scheduler.ConstantLR) + + if loss is None: + loss = torch.nn.MSELoss() + super().__init__( models=models, problem=problem, optimizers=optimizers, - optimizers_kwargs=optimizers_kwargs, + schedulers=schedulers, extra_features=extra_features, ) @@ -85,7 +96,12 @@ def __init__( # variable will be stored with name = self.__logged_metric self.__logged_metric = None - def training_step(self, batch, _): + self._model = self._pina_models[0] + self._optimizer = self._pina_optimizers[0] + self._scheduler = self._pina_schedulers[0] + + + def training_step(self, batch): """ The Physics Informed Solver Training Step. This function takes care of the physics informed training step, and it must not be override @@ -99,52 +115,68 @@ def training_step(self, batch, _): :rtype: LabelTensor """ - condition_losses = [] - condition_idx = batch["condition"] - - for condition_id in range(condition_idx.min(), condition_idx.max() + 1): + condition_loss = [] + for condition_name, points in batch: + if 'output_points' in points: + input_pts, output_pts = points['input_points'], points['output_points'] - condition_name = self._dataloader.condition_names[condition_id] - condition = self.problem.conditions[condition_name] - pts = batch["pts"] - # condition name is logged (if logs enabled) - self.__logged_metric = condition_name - - if len(batch) == 2: - samples = pts[condition_idx == condition_id] - loss = self.loss_phys(samples, condition.equation) - elif len(batch) == 3: - samples = pts[condition_idx == condition_id] - ground_truth = batch["output"][condition_idx == condition_id] - loss = self.loss_data(samples, ground_truth) + loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts) + condition_loss.append(loss_.as_subclass(torch.Tensor)) else: - raise ValueError("Batch size not supported") + input_pts = points['input_points'] - # add condition losses for each epoch - condition_losses.append(loss * condition.data_weight) + condition = self.problem.conditions[condition_name] + loss_ = self.loss_phys(input_pts.requires_grad_(), condition.equation) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + condition_loss.append(loss_.as_subclass(torch.Tensor)) # clamp unknown parameters in InverseProblem (if needed) self._clamp_params() + loss = sum(condition_loss) + self.log('train_loss', loss, prog_bar=True, on_epoch=True, + logger=True, batch_size=self.get_batch_size(batch), + sync_dist=True) - # total loss (must be a torch.Tensor) - total_loss = sum(condition_losses) - return total_loss.as_subclass(torch.Tensor) + return loss - def loss_data(self, input_tensor, output_tensor): + def validation_step(self, batch): + """ + TODO: add docstring + """ + condition_loss = [] + for condition_name, points in batch: + if 'output_points' in points: + input_pts, output_pts = points['input_points'], points['output_points'] + loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + else: + input_pts = points['input_points'] + + condition = self.problem.conditions[condition_name] + with torch.set_grad_enabled(True): + loss_ = self.loss_phys(input_pts.requires_grad_(), condition.equation) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + # clamp unknown parameters in InverseProblem (if needed) + + loss = sum(condition_loss) + self.log('val_loss', loss, on_epoch=True, prog_bar=True, + logger=True, batch_size=self.get_batch_size(batch), + sync_dist=True) + + def loss_data(self, input_pts, output_pts): """ The data loss for the PINN solver. It computes the loss between the network output against the true solution. This function should not be override if not intentionally. - :param LabelTensor input_tensor: The input to the neural networks. - :param LabelTensor output_tensor: The true solution to compare the + :param LabelTensor input_pts: The input to the neural networks. + :param LabelTensor output_pts: The true solution to compare the network solution. :return: The residual loss averaged on the input coordinates :rtype: torch.Tensor """ - loss_value = self.loss(self.forward(input_tensor), output_tensor) - self.store_log(loss_value=float(loss_value)) - return self.loss(self.forward(input_tensor), output_tensor) + return self._loss(self.forward(input_pts), output_pts) @abstractmethod def loss_phys(self, samples, equation): @@ -195,13 +227,17 @@ def store_log(self, loss_value): :param str name: The name of the loss. :param torch.Tensor loss_value: The value of the loss. """ + batch_size = self.trainer.data_module.batch_size \ + if self.trainer.data_module.batch_size is not None else 999 + self.log( self.__logged_metric + "_loss", loss_value, prog_bar=True, logger=True, on_epoch=True, - on_step=False, + on_step=True, + batch_size=batch_size, ) self.__logged_res_losses.append(loss_value) diff --git a/pina/solvers/pinns/pinn.py b/pina/solvers/pinns/pinn.py index 15f90818..08882020 100644 --- a/pina/solvers/pinns/pinn.py +++ b/pina/solvers/pinns/pinn.py @@ -9,10 +9,8 @@ _LRScheduler as LRScheduler, ) # torch < 2.0 -from torch.optim.lr_scheduler import ConstantLR from .basepinn import PINNInterface -from pina.utils import check_consistency from pina.problem import InverseProblem @@ -56,16 +54,16 @@ class PINN(PINNInterface): DOI: `10.1038 `_. """ + __name__ = 'PINN' + def __init__( self, problem, model, extra_features=None, - loss=torch.nn.MSELoss(), - optimizer=torch.optim.Adam, - optimizer_kwargs={"lr": 0.001}, - scheduler=ConstantLR, - scheduler_kwargs={"factor": 1, "total_iters": 0}, + loss=None, + optimizer=None, + scheduler=None, ): """ :param AbstractProblem problem: The formulation of the problem. @@ -82,20 +80,15 @@ def __init__( :param dict scheduler_kwargs: LR scheduler constructor keyword args. """ super().__init__( - models=[model], + models=model, problem=problem, - optimizers=[optimizer], - optimizers_kwargs=[optimizer_kwargs], + optimizers=optimizer, + schedulers=scheduler, extra_features=extra_features, loss=loss, ) - # check consistency - check_consistency(scheduler, LRScheduler, subclass=True) - check_consistency(scheduler_kwargs, dict) - # assign variables - self._scheduler = scheduler(self.optimizers[0], **scheduler_kwargs) self._neural_net = self.models[0] def forward(self, x): @@ -126,9 +119,8 @@ def loss_phys(self, samples, equation): """ residual = self.compute_residual(samples=samples, equation=equation) loss_value = self.loss( - torch.zeros_like(residual, requires_grad=True), residual + torch.zeros_like(residual), residual ) - self.store_log(loss_value=float(loss_value)) return loss_value def configure_optimizers(self): @@ -141,16 +133,21 @@ def configure_optimizers(self): """ # if the problem is an InverseProblem, add the unknown parameters # to the parameters that the optimizer needs to optimize + + + self._optimizer.hook(self._model.parameters()) if isinstance(self.problem, InverseProblem): - self.optimizers[0].add_param_group( - { - "params": [ - self._params[var] - for var in self.problem.unknown_variables - ] - } - ) - return self.optimizers, [self.scheduler] + self._optimizer.optimizer_instance.add_param_group( + { + "params": [ + self._params[var] + for var in self.problem.unknown_variables + ] + } + ) + self._scheduler.hook(self._optimizer) + return ([self._optimizer.optimizer_instance], + [self._scheduler.scheduler_instance]) @property def scheduler(self): diff --git a/pina/solvers/solver.py b/pina/solvers/solver.py index e00bc8d5..3a8f400c 100644 --- a/pina/solvers/solver.py +++ b/pina/solvers/solver.py @@ -2,7 +2,7 @@ from abc import ABCMeta, abstractmethod from ..model.network import Network -import pytorch_lightning +import lightning from ..utils import check_consistency from ..problem import AbstractProblem from ..optim import Optimizer, Scheduler @@ -10,7 +10,8 @@ import sys -class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta): + +class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta): """ Solver base class. This class inherits is a wrapper of LightningModule class, inheriting all the @@ -83,7 +84,6 @@ def __init__(self, " optimizers.") # extra features handling - self._pina_models = models self._pina_optimizers = optimizers self._pina_schedulers = schedulers @@ -94,7 +94,7 @@ def forward(self, *args, **kwargs): pass @abstractmethod - def training_step(self, batch, batch_idx): + def training_step(self, batch): pass @abstractmethod @@ -138,8 +138,16 @@ def _check_solver_consistency(self, problem): TODO """ for _, condition in problem.conditions.items(): - if not set(self.accepted_condition_types).issubset( - condition.condition_type): + if not set(condition.condition_type).issubset( + set(self.accepted_condition_types)): raise ValueError( - f'{self.__name__} support only dose not support condition ' + f'{self.__name__} dose not support condition ' f'{condition.condition_type}') + + @staticmethod + def get_batch_size(batch): + # Assuming batch is your custom Batch object + batch_size = 0 + for data in batch: + batch_size += len(data[1]['input_points']) + return batch_size \ No newline at end of file diff --git a/pina/solvers/supervised.py b/pina/solvers/supervised.py index 62fc9914..d978fc37 100644 --- a/pina/solvers/supervised.py +++ b/pina/solvers/supervised.py @@ -1,12 +1,14 @@ """ Module for SupervisedSolver """ - import torch +from pytorch_lightning.utilities.types import STEP_OUTPUT +from sympy.strategies.branch import condition from torch.nn.modules.loss import _Loss from ..optim import TorchOptimizer, TorchScheduler from .solver import SolverInterface from ..label_tensor import LabelTensor from ..utils import check_consistency from ..loss.loss_interface import LossInterface +from ..condition import InputOutputPointsCondition class SupervisedSolver(SolverInterface): @@ -37,7 +39,7 @@ class SupervisedSolver(SolverInterface): we are seeking to approximate multiple (discretised) functions given multiple (discretised) input functions. """ - accepted_condition_types = ['supervised'] + accepted_condition_types = [InputOutputPointsCondition.condition_type[0]] __name__ = 'SupervisedSolver' def __init__(self, @@ -46,7 +48,8 @@ def __init__(self, loss=None, optimizer=None, scheduler=None, - extra_features=None): + extra_features=None, + use_lt=True): """ :param AbstractProblem problem: The formualation of the problem. :param torch.nn.Module model: The neural network model to use. @@ -72,14 +75,19 @@ def __init__(self, problem=problem, optimizers=optimizer, schedulers=scheduler, - extra_features=extra_features) + extra_features=extra_features, + use_lt=use_lt) # check consistency - check_consistency(loss, (LossInterface, _Loss), subclass=False) + check_consistency(loss, (LossInterface, _Loss, torch.nn.Module), + subclass=False) self._loss = loss self._model = self._pina_models[0] self._optimizer = self._pina_optimizers[0] self._scheduler = self._pina_schedulers[0] + self.validation_condition_losses = { + k: {'loss': [], + 'count': []} for k in self.problem.conditions.keys()} def forward(self, x): """Forward pass implementation for the solver. @@ -105,7 +113,7 @@ def configure_optimizers(self): return ([self._optimizer.optimizer_instance], [self._scheduler.scheduler_instance]) - def training_step(self, batch, batch_idx): + def training_step(self, batch): """Solver training step. :param batch: The batch element in the dataloader. @@ -115,32 +123,36 @@ def training_step(self, batch, batch_idx): :return: The sum of the loss functions. :rtype: LabelTensor """ - condition_idx = batch.supervised.condition_indices - - for condition_id in range(condition_idx.min(), condition_idx.max() + 1): - - condition_name = self._dataloader.condition_names[condition_id] - condition = self.problem.conditions[condition_name] - pts = batch.supervised.input_points - out = batch.supervised.output_points - if condition_name not in self.problem.conditions: - raise RuntimeError("Something wrong happened.") + condition_loss = [] + for condition_name, points in batch: + input_pts, output_pts = points['input_points'], points['output_points'] + loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + loss = sum(condition_loss) + self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, + batch_size=self.get_batch_size(batch), sync_dist=True) + return loss - # for data driven mode - if not hasattr(condition, "output_points"): - raise NotImplementedError( - f"{type(self).__name__} works only in data-driven mode.") - output_pts = out[condition_idx == condition_id] - input_pts = pts[condition_idx == condition_id] + def validation_step(self, batch): + """ + Solver validation step. + """ + condition_loss = [] + for condition_name, points in batch: + input_pts, output_pts = points['input_points'], points['output_points'] + loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts) + condition_loss.append(loss_.as_subclass(torch.Tensor)) + loss = sum(condition_loss) + self.log('val_loss', loss, prog_bar=True, logger=True, + batch_size=self.get_batch_size(batch), sync_dist=True) - input_pts.labels = pts.labels - output_pts.labels = out.labels - loss = self.loss_data(input_pts=input_pts, output_pts=output_pts) - loss = loss.as_subclass(torch.Tensor) + def test_step(self, batch, batch_idx) -> STEP_OUTPUT: + """ + Solver test step. + """ - self.log("mean_loss", float(loss), prog_bar=True, logger=True) - return loss + raise NotImplementedError("Test step not implemented yet.") def loss_data(self, input_pts, output_pts): """ diff --git a/pina/trainer.py b/pina/trainer.py index 58c66f67..a7c5c351 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -1,20 +1,21 @@ """ Trainer module. """ - +import warnings import torch -import pytorch_lightning +import lightning from .utils import check_consistency from .data import PinaDataModule from .solvers.solver import SolverInterface -class Trainer(pytorch_lightning.Trainer): +class Trainer(lightning.pytorch.Trainer): def __init__(self, solver, batch_size=None, train_size=.7, test_size=.2, - eval_size=.1, + val_size=.1, + predict_size=.0, **kwargs): """ PINA Trainer class for costumizing every aspect of training via flags. @@ -39,11 +40,13 @@ def __init__(self, check_consistency(batch_size, int) self.train_size = train_size self.test_size = test_size - self.eval_size = eval_size + self.val_size = val_size + self.predict_size = predict_size self.solver = solver self.batch_size = batch_size - self._create_loader() self._move_to_device() + self.data_module = None + self._create_loader() def _move_to_device(self): device = self._accelerator_connector._parallel_devices[0] @@ -64,34 +67,34 @@ def _create_loader(self): if not self.solver.problem.collector.full: error_message = '\n'.join([ f"""{" " * 13} ---> Condition {key} {"sampled" if value else - "not sampled"}""" for key, value in + "not sampled"}""" for key, value in self._solver.problem.collector._is_conditions_ready.items() ]) raise RuntimeError('Cannot create Trainer if not all conditions ' 'are sampled. The Trainer got the following:\n' f'{error_message}') - devices = self._accelerator_connector._parallel_devices - - if len(devices) > 1: - raise RuntimeError("Parallel training is not supported yet.") - - device = devices[0] - - data_module = PinaDataModule(problem=self.solver.problem, - device=device, - train_size=self.train_size, - test_size=self.test_size, - val_size=self.eval_size) - data_module.setup() - self._loader = data_module.train_dataloader() + self.data_module = PinaDataModule(collector=self.solver.problem.collector, + train_size=self.train_size, + test_size=self.test_size, + val_size=self.val_size, + predict_size=self.predict_size, + batch_size=self.batch_size,) def train(self, **kwargs): """ Train the solver method. """ return super().fit(self.solver, - train_dataloaders=self._loader, - **kwargs) + datamodule=self.data_module, + **kwargs) + + def test(self, **kwargs): + """ + Test the solver method. + """ + return super().test(self.solver, + datamodule=self.data_module, + **kwargs) @property def solver(self): diff --git a/setup.py b/setup.py index c44cacf7..8a6c2324 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,8 @@ KEYWORDS = 'physics-informed neural-network' REQUIRED = [ - 'numpy', 'matplotlib', 'torch', 'lightning', 'pytorch_lightning', 'torch_geometric', 'torch-cluster' + 'numpy', 'matplotlib', 'torch', 'lightning', 'torch_geometric', + 'torch-cluster', 'pytorch_lightning', ] EXTRAS = { diff --git a/tests/test_dataset.py b/tests/test_dataset.py deleted file mode 100644 index 87fd9a15..00000000 --- a/tests/test_dataset.py +++ /dev/null @@ -1,227 +0,0 @@ -import math -import torch -from pina.data import SamplePointDataset, SupervisedDataset, PinaDataModule, \ - UnsupervisedDataset -from pina.data import PinaDataLoader -from pina import LabelTensor, Condition -from pina.equation import Equation -from pina.domain import CartesianDomain -from pina.problem import SpatialProblem, AbstractProblem -from pina.operators import laplacian -from pina.equation.equation_factory import FixedValue -from pina.graph import Graph - - -def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x']) * torch.pi) * - torch.sin(input_.extract(['y']) * torch.pi)) - delta_u = laplacian(output_.extract(['u']), input_) - return delta_u - force_term - - -my_laplace = Equation(laplace_equation) -in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) -out_ = LabelTensor(torch.tensor([[0.]]), ['u']) -in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y']) -out2_ = LabelTensor(torch.rand(60, 1), ['u']) - - -class Poisson(SpatialProblem): - output_variables = ['u'] - spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) - - conditions = { - 'gamma1': - Condition(domain=CartesianDomain({ - 'x': [0, 1], - 'y': 1 - }), - equation=FixedValue(0.0)), - 'gamma2': - Condition(domain=CartesianDomain({ - 'x': [0, 1], - 'y': 0 - }), - equation=FixedValue(0.0)), - 'gamma3': - Condition(domain=CartesianDomain({ - 'x': 1, - 'y': [0, 1] - }), - equation=FixedValue(0.0)), - 'gamma4': - Condition(domain=CartesianDomain({ - 'x': 0, - 'y': [0, 1] - }), - equation=FixedValue(0.0)), - 'D': - Condition(input_points=LabelTensor(torch.rand(size=(100, 2)), - ['x', 'y']), - equation=my_laplace), - 'data': - Condition(input_points=in_, output_points=out_), - 'data2': - Condition(input_points=in2_, output_points=out2_), - 'unsupervised': - Condition( - input_points=LabelTensor(torch.rand(size=(45, 2)), ['x', 'y']), - conditional_variables=LabelTensor(torch.ones(size=(45, 1)), - ['alpha']), - ), - 'unsupervised2': - Condition( - input_points=LabelTensor(torch.rand(size=(90, 2)), ['x', 'y']), - conditional_variables=LabelTensor(torch.ones(size=(90, 1)), - ['alpha']), - ) - } - - -boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -poisson = Poisson() -poisson.discretise_domain(10, 'grid', locations=boundaries) - - -def test_sample(): - sample_dataset = SamplePointDataset(poisson, device='cpu') - assert len(sample_dataset) == 140 - assert sample_dataset.input_points.shape == (140, 2) - assert sample_dataset.input_points.labels == ['x', 'y'] - assert sample_dataset.condition_indices.dtype == torch.uint8 - assert sample_dataset.condition_indices.max() == torch.tensor(4) - assert sample_dataset.condition_indices.min() == torch.tensor(0) - - -def test_data(): - dataset = SupervisedDataset(poisson, device='cpu') - assert len(dataset) == 61 - assert dataset['input_points'].shape == (61, 2) - assert dataset.input_points.shape == (61, 2) - assert dataset['input_points'].labels == ['x', 'y'] - assert dataset.input_points.labels == ['x', 'y'] - assert dataset.input_points[3:].shape == (58, 2) - assert dataset.output_points[:3].labels == ['u'] - assert dataset.output_points.shape == (61, 1) - assert dataset.output_points.labels == ['u'] - assert dataset.condition_indices.dtype == torch.uint8 - assert dataset.condition_indices.max() == torch.tensor(1) - assert dataset.condition_indices.min() == torch.tensor(0) - - -def test_unsupervised(): - dataset = UnsupervisedDataset(poisson, device='cpu') - assert len(dataset) == 135 - assert dataset.input_points.shape == (135, 2) - assert dataset.input_points.labels == ['x', 'y'] - assert dataset.input_points[3:].shape == (132, 2) - - assert dataset.conditional_variables.shape == (135, 1) - assert dataset.conditional_variables.labels == ['alpha'] - assert dataset.condition_indices.dtype == torch.uint8 - assert dataset.condition_indices.max() == torch.tensor(1) - assert dataset.condition_indices.min() == torch.tensor(0) - - -def test_data_module(): - data_module = PinaDataModule(poisson, device='cpu') - data_module.setup() - loader = data_module.train_dataloader() - assert isinstance(loader, PinaDataLoader) - assert isinstance(loader, PinaDataLoader) - - data_module = PinaDataModule(poisson, - device='cpu', - batch_size=10, - shuffle=False) - data_module.setup() - loader = data_module.train_dataloader() - assert len(loader) == 24 - for i in loader: - assert len(i) <= 10 - len_ref = sum( - [math.ceil(len(dataset) * 0.7) for dataset in data_module.datasets]) - len_real = sum( - [len(dataset) for dataset in data_module.splits['train'].values()]) - assert len_ref == len_real - - supervised_dataset = SupervisedDataset(poisson, device='cpu') - data_module = PinaDataModule(poisson, - device='cpu', - batch_size=10, - shuffle=False, - datasets=[supervised_dataset]) - data_module.setup() - loader = data_module.train_dataloader() - for batch in loader: - assert len(batch) <= 10 - - physics_dataset = SamplePointDataset(poisson, device='cpu') - data_module = PinaDataModule(poisson, - device='cpu', - batch_size=10, - shuffle=False, - datasets=[physics_dataset]) - data_module.setup() - loader = data_module.train_dataloader() - for batch in loader: - assert len(batch) <= 10 - - unsupervised_dataset = UnsupervisedDataset(poisson, device='cpu') - data_module = PinaDataModule(poisson, - device='cpu', - batch_size=10, - shuffle=False, - datasets=[unsupervised_dataset]) - data_module.setup() - loader = data_module.train_dataloader() - for batch in loader: - assert len(batch) <= 10 - - -def test_loader(): - data_module = PinaDataModule(poisson, device='cpu', batch_size=10) - data_module.setup() - loader = data_module.train_dataloader() - assert isinstance(loader, PinaDataLoader) - assert len(loader) == 24 - for i in loader: - assert len(i) <= 10 - assert i.supervised.input_points.labels == ['x', 'y'] - assert i.physics.input_points.labels == ['x', 'y'] - assert i.unsupervised.input_points.labels == ['x', 'y'] - assert i.supervised.input_points.requires_grad == True - assert i.physics.input_points.requires_grad == True - assert i.unsupervised.input_points.requires_grad == True - - -coordinates = LabelTensor(torch.rand((100, 100, 2)), labels=['x', 'y']) -data = LabelTensor(torch.rand((100, 100, 3)), labels=['ux', 'uy', 'p']) - - -class GraphProblem(AbstractProblem): - output = LabelTensor(torch.rand((100, 3)), labels=['ux', 'uy', 'p']) - input = [ - Graph.build('radius', - nodes_coordinates=coordinates[i, :, :], - nodes_data=data[i, :, :], - radius=0.2) for i in range(100) - ] - output_variables = ['u'] - - conditions = { - 'graph_data': Condition(input_points=input, output_points=output) - } - - -graph_problem = GraphProblem() - - -def test_loader_graph(): - data_module = PinaDataModule(graph_problem, device='cpu', batch_size=10) - data_module.setup() - loader = data_module.train_dataloader() - for i in loader: - assert len(i) <= 10 - assert isinstance(i.supervised.input_points, list) - assert all(isinstance(x, Graph) for x in i.supervised.input_points) diff --git a/tests/test_label_tensor/test_label_tensor_01.py b/tests/test_label_tensor/test_label_tensor_01.py index 57aafb8c..ea43307c 100644 --- a/tests/test_label_tensor/test_label_tensor_01.py +++ b/tests/test_label_tensor/test_label_tensor_01.py @@ -114,5 +114,5 @@ def test_slice(): assert torch.allclose(tensor_view2, data[3]) tensor_view3 = tensor[:, 2] - assert tensor_view3.labels == labels[2] + assert tensor_view3.labels == [labels[2]] assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1)) diff --git a/tests/test_solvers/test_pinn.py b/tests/test_solvers/test_pinn.py index 8ee9d612..72887a4f 100644 --- a/tests/test_solvers/test_pinn.py +++ b/tests/test_solvers/test_pinn.py @@ -1,5 +1,4 @@ import torch - from pina.problem import SpatialProblem, InverseProblem from pina.operators import laplacian from pina.domain import CartesianDomain @@ -9,7 +8,7 @@ from pina.model import FeedForward from pina.equation.equation import Equation from pina.equation.equation_factory import FixedValue -from pina.loss.loss_interface import LpLoss +from pina.loss import LpLoss def laplace_equation(input_, output_): @@ -54,22 +53,22 @@ def laplace_equation(input_, output_, params_): # define the conditions for the loss (boundary conditions, equation, data) conditions = { - 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max], + 'gamma1': Condition(domain=CartesianDomain({'x': [x_min, x_max], 'y': y_max}), equation=FixedValue(0.0, components=['u'])), - 'gamma2': Condition(location=CartesianDomain( + 'gamma2': Condition(domain=CartesianDomain( {'x': [x_min, x_max], 'y': y_min }), equation=FixedValue(0.0, components=['u'])), - 'gamma3': Condition(location=CartesianDomain( + 'gamma3': Condition(domain=CartesianDomain( {'x': x_max, 'y': [y_min, y_max] }), equation=FixedValue(0.0, components=['u'])), - 'gamma4': Condition(location=CartesianDomain( + 'gamma4': Condition(domain=CartesianDomain( {'x': x_min, 'y': [y_min, y_max] }), equation=FixedValue(0.0, components=['u'])), - 'D': Condition(location=CartesianDomain( + 'D': Condition(domain=CartesianDomain( {'x': [x_min, x_max], 'y': [y_min, y_max] }), equation=Equation(laplace_equation)), @@ -84,16 +83,16 @@ class Poisson(SpatialProblem): conditions = { 'gamma1': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 1}), + domain=CartesianDomain({'x': [0, 1], 'y': 1}), equation=FixedValue(0.0)), 'gamma2': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 0}), + domain=CartesianDomain({'x': [0, 1], 'y': 0}), equation=FixedValue(0.0)), 'gamma3': Condition( - location=CartesianDomain({'x': 1, 'y': [0, 1]}), + domain=CartesianDomain({'x': 1, 'y': [0, 1]}), equation=FixedValue(0.0)), 'gamma4': Condition( - location=CartesianDomain({'x': 0, 'y': [0, 1]}), + domain=CartesianDomain({'x': 0, 'y': [0, 1]}), equation=FixedValue(0.0)), 'D': Condition( input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']), @@ -112,7 +111,6 @@ def poisson_sol(self, pts): truth_solution = poisson_sol - class myFeature(torch.nn.Module): """ Feature: sin(x) @@ -158,12 +156,10 @@ def test_train_cpu(): pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) trainer = Trainer(solver=pinn, max_epochs=1, - accelerator='cpu', batch_size=20) - trainer.train() - + accelerator='cpu', batch_size=20, val_size=0., train_size=1., test_size=0.) -def test_train_restore(): - tmpdir = "tests/tmp_restore" +def test_train_load(): + tmpdir = "tests/tmp_load" poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 @@ -173,20 +169,25 @@ def test_train_restore(): extra_features=None, loss=LpLoss()) trainer = Trainer(solver=pinn, - max_epochs=5, + max_epochs=15, accelerator='cpu', default_root_dir=tmpdir) trainer.train() - ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu') - t = ntrainer.train( - ckpt_path=f'{tmpdir}/lightning_logs/version_0/' - 'checkpoints/epoch=4-step=10.ckpt') + new_pinn = PINN.load_from_checkpoint( + f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt', + problem = poisson_problem, model=model) + test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10) + assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1) + assert new_pinn.forward(test_pts).extract( + ['u']).shape == pinn.forward(test_pts).extract(['u']).shape + torch.testing.assert_close( + new_pinn.forward(test_pts).extract(['u']), + pinn.forward(test_pts).extract(['u'])) import shutil shutil.rmtree(tmpdir) - -def test_train_load(): - tmpdir = "tests/tmp_load" +def test_train_restore(): + tmpdir = "tests/tmp_restore" poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 @@ -196,20 +197,14 @@ def test_train_load(): extra_features=None, loss=LpLoss()) trainer = Trainer(solver=pinn, - max_epochs=15, + max_epochs=5, accelerator='cpu', default_root_dir=tmpdir) trainer.train() - new_pinn = PINN.load_from_checkpoint( - f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt', - problem = poisson_problem, model=model) - test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10) - assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1) - assert new_pinn.forward(test_pts).extract( - ['u']).shape == pinn.forward(test_pts).extract(['u']).shape - torch.testing.assert_close( - new_pinn.forward(test_pts).extract(['u']), - pinn.forward(test_pts).extract(['u'])) + ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu') + t = ntrainer.train( + ckpt_path=f'{tmpdir}/lightning_logs/version_0/' + 'checkpoints/epoch=4-step=5.ckpt') import shutil shutil.rmtree(tmpdir) @@ -217,36 +212,24 @@ def test_train_inverse_problem_cpu(): poisson_problem = InversePoisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D'] n = 100 - poisson_problem.discretise_domain(n, 'random', locations=boundaries) + poisson_problem.discretise_domain(n, 'random', locations=boundaries, + variables=['x', 'y']) pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) trainer = Trainer(solver=pinn, max_epochs=1, accelerator='cpu', batch_size=20) trainer.train() - -# # TODO does not currently work -# def test_train_inverse_problem_restore(): -# tmpdir = "tests/tmp_restore_inv" -# poisson_problem = InversePoisson() -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D'] -# n = 100 -# poisson_problem.discretise_domain(n, 'random', locations=boundaries) -# pinn = PINN(problem=poisson_problem, -# model=model, -# extra_features=None, -# loss=LpLoss()) -# trainer = Trainer(solver=pinn, -# max_epochs=5, -# accelerator='cpu', -# default_root_dir=tmpdir) -# trainer.train() -# ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu') -# t = ntrainer.train( -# ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt') -# import shutil -# shutil.rmtree(tmpdir) - +def test_train_extra_feats_cpu(): + poisson_problem = Poisson() + boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] + n = 10 + poisson_problem.discretise_domain(n, 'grid', locations=boundaries) + pinn = PINN(problem=poisson_problem, + model=model_extra_feats, + extra_features=extra_feats) + trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu') + trainer.train() def test_train_inverse_problem_load(): tmpdir = "tests/tmp_load_inv" @@ -264,7 +247,7 @@ def test_train_inverse_problem_load(): default_root_dir=tmpdir) trainer.train() new_pinn = PINN.load_from_checkpoint( - f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt', + f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt', problem = poisson_problem, model=model) test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10) assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1) @@ -274,160 +257,4 @@ def test_train_inverse_problem_load(): new_pinn.forward(test_pts).extract(['u']), pinn.forward(test_pts).extract(['u'])) import shutil - shutil.rmtree(tmpdir) - -# # TODO fix asap. Basically sampling few variables -# # works only if both variables are in a range. -# # if one is fixed and the other not, this will -# # not work. This test also needs to be fixed and -# # insert in test problem not in test pinn. -# def test_train_cpu_sampling_few_vars(): -# poisson_problem = Poisson() -# boundaries = ['gamma1', 'gamma2', 'gamma3'] -# n = 10 -# poisson_problem.discretise_domain(n, 'grid', locations=boundaries) -# poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['x']) -# poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['y']) -# pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) -# trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'cpu'}) -# trainer.train() - - -def test_train_extra_feats_cpu(): - poisson_problem = Poisson() - boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] - n = 10 - poisson_problem.discretise_domain(n, 'grid', locations=boundaries) - pinn = PINN(problem=poisson_problem, - model=model_extra_feats, - extra_features=extra_feats) - trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu') - trainer.train() - - -# TODO, fix GitHub actions to run also on GPU -# def test_train_gpu(): -# poisson_problem = Poisson() -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# poisson_problem.discretise_domain(n, 'grid', locations=boundaries) -# pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) -# trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'}) -# trainer.train() - -# def test_train_gpu(): #TODO fix ASAP -# poisson_problem = Poisson() -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# poisson_problem.discretise_domain(n, 'grid', locations=boundaries) -# poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu -# pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) -# trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'}) -# trainer.train() - -# def test_train_2(): -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# expected_keys = [[], list(range(0, 50, 3))] -# param = [0, 3] -# for i, truth_key in zip(param, expected_keys): -# pinn = PINN(problem, model) -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(50, save_loss=i) -# assert list(pinn.history_loss.keys()) == truth_key - - -# def test_train_extra_feats(): -# pinn = PINN(problem, model_extra_feat, [myFeature()]) -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(5) - - -# def test_train_2_extra_feats(): -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# expected_keys = [[], list(range(0, 50, 3))] -# param = [0, 3] -# for i, truth_key in zip(param, expected_keys): -# pinn = PINN(problem, model_extra_feat, [myFeature()]) -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(50, save_loss=i) -# assert list(pinn.history_loss.keys()) == truth_key - - -# def test_train_with_optimizer_kwargs(): -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# expected_keys = [[], list(range(0, 50, 3))] -# param = [0, 3] -# for i, truth_key in zip(param, expected_keys): -# pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3}) -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(50, save_loss=i) -# assert list(pinn.history_loss.keys()) == truth_key - - -# def test_train_with_lr_scheduler(): -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 10 -# expected_keys = [[], list(range(0, 50, 3))] -# param = [0, 3] -# for i, truth_key in zip(param, expected_keys): -# pinn = PINN( -# problem, -# model, -# lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR, -# lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False} -# ) -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(50, save_loss=i) -# assert list(pinn.history_loss.keys()) == truth_key - - -# # def test_train_batch(): -# # pinn = PINN(problem, model, batch_size=6) -# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# # n = 10 -# # pinn.discretise_domain(n, 'grid', locations=boundaries) -# # pinn.discretise_domain(n, 'grid', locations=['D']) -# # pinn.train(5) - - -# # def test_train_batch_2(): -# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# # n = 10 -# # expected_keys = [[], list(range(0, 50, 3))] -# # param = [0, 3] -# # for i, truth_key in zip(param, expected_keys): -# # pinn = PINN(problem, model, batch_size=6) -# # pinn.discretise_domain(n, 'grid', locations=boundaries) -# # pinn.discretise_domain(n, 'grid', locations=['D']) -# # pinn.train(50, save_loss=i) -# # assert list(pinn.history_loss.keys()) == truth_key - - -# if torch.cuda.is_available(): - -# # def test_gpu_train(): -# # pinn = PINN(problem, model, batch_size=20, device='cuda') -# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# # n = 100 -# # pinn.discretise_domain(n, 'grid', locations=boundaries) -# # pinn.discretise_domain(n, 'grid', locations=['D']) -# # pinn.train(5) - -# def test_gpu_train_nobatch(): -# pinn = PINN(problem, model, batch_size=None, device='cuda') -# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] -# n = 100 -# pinn.discretise_domain(n, 'grid', locations=boundaries) -# pinn.discretise_domain(n, 'grid', locations=['D']) -# pinn.train(5) - + shutil.rmtree(tmpdir) \ No newline at end of file diff --git a/tests/test_solvers/test_supervised_solver.py b/tests/test_solvers/test_supervised_solver.py index 8ceadcd9..ebe8179e 100644 --- a/tests/test_solvers/test_supervised_solver.py +++ b/tests/test_solvers/test_supervised_solver.py @@ -121,7 +121,7 @@ def test_train_cpu(): batch_size=5, train_size=1, test_size=0., - eval_size=0.) + val_size=0.) trainer.train() test_train_cpu() diff --git a/tutorials/tutorial5/tutorial.ipynb b/tutorials/tutorial5/tutorial.ipynb deleted file mode 100644 index 64032d31..00000000 --- a/tutorials/tutorial5/tutorial.ipynb +++ /dev/null @@ -1,469 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "e80567a6", - "metadata": {}, - "source": [ - "# Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator" - ] - }, - { - "cell_type": "markdown", - "id": "8762bbe5", - "metadata": {}, - "source": [ - "In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for\n", - "Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input-output operations." - ] - }, - { - "cell_type": "code", - "id": "5f2744dc", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:28.837348Z", - "start_time": "2024-09-19T13:35:27.611334Z" - } - }, - "source": [ - "# !pip install scipy # install scipy\n", - "from scipy import io\n", - "import torch\n", - "from pina.model import FNO, FeedForward # let's import some models\n", - "from pina import Condition, LabelTensor\n", - "from pina.solvers import SupervisedSolver\n", - "from pina.trainer import Trainer\n", - "from pina.problem import AbstractProblem\n", - "import matplotlib.pyplot as plt" - ], - "outputs": [], - "execution_count": 1 - }, - { - "cell_type": "markdown", - "id": "4cf5b181", - "metadata": {}, - "source": [ - "## Data Generation\n", - "\n", - "We will focus on solving a specific PDE, the **Darcy Flow** equation. The Darcy PDE is a second-order elliptic PDE with the following form:\n", - "\n", - "$$\n", - "-\\nabla\\cdot(k(x, y)\\nabla u(x, y)) = f(x) \\quad (x, y) \\in D.\n", - "$$\n", - "\n", - "Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference.\n" - ] - }, - { - "cell_type": "code", - "id": "2ffb8a4c", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:28.989631Z", - "start_time": "2024-09-19T13:35:28.952744Z" - } - }, - "source": [ - "# download the dataset\n", - "data = io.loadmat(\"Data_Darcy.mat\")\n", - "\n", - "# extract data (we use only 100 data for train)\n", - "k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), \n", - " labels={3:{'dof': ['u0'], 'name': 'k_train'}})\n", - "u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u'], 'name': 'u_train'}})\n", - "k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u0'], 'name': 'k_test'}})\n", - "u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1),\n", - " labels={3:{'dof': ['u'], 'name': 'u_test'}})\n", - "x = torch.tensor(data['x'], dtype=torch.float)[0]\n", - "y = torch.tensor(data['y'], dtype=torch.float)[0]" - ], - "outputs": [], - "execution_count": 2 - }, - { - "cell_type": "markdown", - "id": "9a9defd4", - "metadata": {}, - "source": [ - "Let's visualize some data" - ] - }, - { - "cell_type": "code", - "id": "c8501b6f", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.108381Z", - "start_time": "2024-09-19T13:35:29.031076Z" - } - }, - "source": [ - "plt.subplot(1, 2, 1)\n", - "plt.title('permeability')\n", - "plt.imshow(k_train.squeeze(-1)[0])\n", - "plt.subplot(1, 2, 2)\n", - "plt.title('field solution')\n", - "plt.imshow(u_train.squeeze(-1)[0])\n", - "plt.show()" - ], - "outputs": [ - { - "data": { - "text/plain": [ - "
" - ], - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAEjCAYAAAARyVqhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA73klEQVR4nO3de3RTZbo/8O9O2qaFtilI6QW5FbkI2KIonSpXQUv1IKDDpaPSMoIzHhid1YWjdQkUbz3iURmBA+oMFAdUdAbhzBEZsQocB1AB63Xk0NrSIm1pC73T5rLf3x/+Gg295H1pQ3bD97NWFmTnyc67k+ynT5K9n1cTQggQERERGZjJ1wMgIiIi8oQFCxERERkeCxYiIiIyPBYsREREZHgsWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDI8FC3VbWVlZ0DQNlZWVHmMHDRqE9PR01/V9+/ZB0zTs27fPtSw9PR2DBg3q+oESGcxnn32GG2+8ET179oSmacjLy3PtTxdDdt8pKiqCpmnIycm5qMe5GDk5OdA0DUVFRV263smTJ2Py5Mlduk7qGAsWonY0NjYiKyvLragh6u7sdjvmzJmDs2fP4sUXX8Rf/vIXDBw40NfDMqRvv/0WWVlZXV7s0MUJ8PUAiC6F48ePw2TquD5/9dVXoeu663pjYyNWrVoFAPwkRX6joKAAJ0+exKuvvopFixa5lj/++ON49NFHfTgy4/n222+xatUqTJ48udU3SO+//75vBnUZY8FCHjU2NqJHjx6+HkanWCwWjzGBgYGXYCREvnXmzBkAQEREhNvygIAABATwT4KsoKAgXw/hssOfhLq5lt+dv/vuO8ydOxfh4eG44oor8NBDD6GpqcktduvWrRg7dixCQkLQu3dvzJ8/HyUlJW4xkydPxujRo3H06FFMnDgRPXr0wGOPPeb67fk///M/sX79esTFxaFHjx649dZbUVJSAiEEnnzySVx55ZUICQnBzJkzcfbs2Vbjfe+99zBhwgT07NkTYWFhuP322/HNN9+4xXz55ZdIT09HXFwcgoODER0djV//+teoqqpq8zmorKz0uO0XHsPSlp//Dl9UVITIyEgAwKpVq6BpGjRNQ1ZWFjZv3gxN0/D555+3WsczzzwDs9mMH374ocPHIvKF9PR0TJo0CQAwZ84caJrm+vawvWNYZPJGW6qrq5Geng6r1YqIiAikpaWhurpaapx2ux2rVq3C0KFDERwcjCuuuALjx4/H3r173eI+/PBDVz6JiIjAzJkz8a9//cvj+lv25Qv9PE/k5ORgzpw5AIApU6a4ckDLT8RtHcNy5swZ3HfffYiKikJwcDASEhKwZcsWt5if59JXXnkFQ4YMgcViwQ033IDPPvtM6vm5XLGc9hNz587FoEGDkJ2djcOHD+Oll17CuXPn8NprrwEAnn76aSxfvhxz587FokWLUFFRgbVr12LixIn4/PPP3T5tVVVVISUlBfPnz8c999yDqKgo123btm2DzWbD7373O5w9exarV6/G3LlzcfPNN2Pfvn145JFHkJ+fj7Vr12LZsmXYtGmT675/+ctfkJaWhuTkZDz77LNobGzEhg0bMH78eHz++eeuYmHv3r34/vvvsXDhQkRHR+Obb77BK6+8gm+++QaHDx9ulVQ9bfvFiIyMxIYNG/DAAw9g9uzZuPPOOwEA8fHxGDx4MJYsWYJt27bh2muvdbvftm3bMHnyZPTr1++iH5vIW37zm9+gX79+eOaZZ/Dggw/ihhtucNu/L6SSN35OCIGZM2fi448/xm9/+1tcffXVeOedd5CWliY1zqysLGRnZ2PRokUYN24camtrceTIERw7dgy33HILAOCDDz5ASkoK4uLikJWVhfPnz2Pt2rW46aabcOzYsU4fQD9x4kQ8+OCDeOmll/DYY4/h6quvBgDXvxc6f/48Jk+ejPz8fCxduhSDBw/G22+/jfT0dFRXV+Ohhx5yi3/99ddRV1eH3/zmN9A0DatXr8add96J77//nt/2tkdQt7Zy5UoBQNxxxx1uy//93/9dABBffPGFKCoqEmazWTz99NNuMV999ZUICAhwWz5p0iQBQGzcuNEttrCwUAAQkZGRorq62rU8MzNTABAJCQnCbre7lqempoqgoCDR1NQkhBCirq5OREREiMWLF7utt6ysTFitVrfljY2NrbbzjTfeEADEgQMHlLa9xcCBA0VaWprr+kcffSQAiI8++si1LC0tTQwcONB1vaKiQgAQK1eubDWe1NRUERsbK5xOp2vZsWPHBACxefPmVvFERtHy3n/77bfdlrfsTy1U8saF+87OnTsFALF69WrXMofDISZMmCC1jyQkJIjbb7+9w5gxY8aIvn37iqqqKteyL774QphMJrFgwQLXss2bNwsAorCw0LWsvf36wjzx9ttvt8oTLSZNmiQmTZrkur5mzRoBQGzdutW1zGaziaSkJBEaGipqa2uFED/l0iuuuEKcPXvWFbtr1y4BQPz973/vcLsvZ/xJyE8sWbLE7frvfvc7AMDu3buxY8cO6LqOuXPnorKy0nWJjo7G0KFD8dFHH7nd12KxYOHChW0+zpw5c2C1Wl3XExMTAQD33HOP2+/fiYmJsNlsrp9G9u7di+rqaqSmprqNwWw2IzEx0W0MISEhrv83NTWhsrISv/jFLwAAx44dU9p2b1mwYAFOnz7tNu5t27YhJCQEd911l9cel+hSUc0bP7d7924EBATggQcecC0zm82ufdOTiIgIfPPNNzhx4kSbt5eWliIvLw/p6eno3bu3a3l8fDxuueUWr+777dm9ezeio6ORmprqWhYYGIgHH3wQ9fX12L9/v1v8vHnz0KtXL9f1CRMmAAC+//77SzPgbog/CfmJoUOHul0fMmQITCYTioqKYDKZIIRoFdPiwq8f+/Xr1+4BZQMGDHC73lK89O/fv83l586dAwBX4rn55pvbXG94eLjr/2fPnsWqVavw5ptvug4QbFFTU9Pqvh1tu7fccsstiImJwbZt2zB16lTouo433ngDM2fORFhYmNcel+hSOXHihFLe+LmTJ08iJiYGoaGhbsuHDx8u9dhPPPEEZs6ciWHDhmH06NGYPn067r33XsTHx7vW3976rr76avzjH/9AQ0MDevbsKfV4XeHkyZMYOnRoq7MRW35CahlziwtzaUvx0pIzqTUWLH7q58d56LoOTdPw3nvvwWw2t4q9MKn8/BuOC7V1/46WCyFcYwB+PI4lOjq6VdzPv52ZO3cuDh48iIcffhhjxoxBaGgodF3H9OnT3U47bs/FNr9SYTab8atf/Qqvvvoq/uu//gv//Oc/cfr0adxzzz1ef2yiS0E1b3SliRMnoqCgALt27cL777+PP/3pT3jxxRexceNGt1Oxu5rT6fTaui/kKWdSayxY/MSJEycwePBg1/X8/Hzouo5BgwbBbDZDCIHBgwdj2LBhPhnfkCFDAAB9+/bFtGnT2o07d+4ccnNzsWrVKqxYscK1vL2vhltua2/bO8NT4bNgwQI8//zz+Pvf/4733nsPkZGRSE5O7tRjEhnFkCFDLjpvDBw4ELm5uaivr3crbI4fPy69jt69e2PhwoVYuHAh6uvrMXHiRGRlZWHRokWuRndtre+7775Dnz59Ovx2pVevXq3OWLLZbCgtLXVbpvLhZ+DAgfjyyy+h67rbtyzfffed63bqHB7D4ifWr1/vdn3t2rUAgJSUFNx5550wm81YtWpVq+pdCNHu6cJdKTk5GeHh4XjmmWdgt9tb3V5RUQHgp08dF45zzZo17a67o23vjJbeM+2dihkfH4/4+Hj86U9/wt/+9jfMnz+ffSzIb3Qmb9x2221wOBzYsGGDa5nT6XTtm55cuO7Q0FBcddVVaG5uBgDExMRgzJgx2LJli9v++fXXX+P999/Hbbfd1uH6hwwZggMHDrgte+WVV1p9w9JS9Micjn3bbbehrKwM27dvdy1zOBxYu3YtQkNDXaeT08VjdvUThYWFuOOOOzB9+nQcOnQIW7duxa9+9SskJCQAAJ566ilkZmaiqKgIs2bNQlhYGAoLC/HOO+/g/vvvx7Jly7w6vvDwcGzYsAH33nsvrrvuOsyfPx+RkZEoLi7Gu+++i5tuugnr1q1DeHg4Jk6ciNWrV8Nut6Nfv354//33UVhYeNHbfrFCQkIwcuRIbN++HcOGDUPv3r0xevRojB492hWzYMEC13PHn4PInwwZMuSi88aMGTNw00034dFHH0VRURFGjhyJHTt2tHkMWltGjhyJyZMnY+zYsejduzeOHDmCv/71r1i6dKkr5rnnnkNKSgqSkpJw3333uU5rtlqtbfZY+blFixbht7/9Le666y7ccsst+OKLL/CPf/wDffr0cYsbM2YMzGYznn32WdTU1MBiseDmm29G3759W63z/vvvx8svv4z09HQcPXoUgwYNwl//+lf885//xJo1a3hsW1fw0dlJ1EVaTkX89ttvxS9/+UsRFhYmevXqJZYuXSrOnz/vFvu3v/1NjB8/XvTs2VP07NlTjBgxQixZskQcP37cFTNp0iQxatSoVo/Tcirec88957a8vVMkW04l/Oyzz1rFJycnC6vVKoKDg8WQIUNEenq6OHLkiCvm1KlTYvbs2SIiIkJYrVYxZ84ccfr06VanIqps+8Wc1iyEEAcPHhRjx44VQUFBbZ4KWVpaKsxmsxg2bFir54zIiGRPa24hkzfa2neqqqrEvffeK8LDw4XVahX33nuv+Pzzz6VOa37qqafEuHHjREREhAgJCREjRowQTz/9tLDZbG5xH3zwgbjppptESEiICA8PFzNmzBDffvutW0xbpzU7nU7xyCOPiD59+ogePXqI5ORkkZ+f3ypPCCHEq6++KuLi4oTZbHbLGRee1iyEEOXl5WLhwoWiT58+IigoSFxzzTWttrW9XCpE+6db0480IXiET3eWlZWFVatWoaKiotWnA/K+yspKxMTEYMWKFVi+fLmvh0NE5Ld4DAtRJ+Tk5MDpdOLee+/19VCIiPwaj2Ehuggffvghvv32Wzz99NOYNWtWp89IIiKijrFgIboITzzxBA4ePIibbrpJ+swHIiK6eDyGhYiIiAyPx7AQERGR4bFgISIiIsPzi2NYdF3H6dOnERYWdknmkSGi1oQQqKurQ2xsbKsJ4IyKuYPIt1Tyhl8ULKdPn241WzAR+UZJSQmuvPJKXw9DCnMHkTHI5A2vFSzr16/Hc889h7KyMiQkJGDt2rUYN25cu/Fvv/02li9fjqKiIgwdOhTPPvusx/kgWrS0PL5y5eMwBQd7jNc8T/jrpmexwqdFhQ9ptVe3nlOnPZbS9qdy76weZ+SPuw6sVztGO6RCfhthkn/ydLPap+GGaPm3evA5+TdIwHn5WE3x+Pag6mbpWD2w7Zlf2xKQX+o56GeclZVScQ7Y8TF2d6oF+aXMG8BPuePksUEID/W8n1c4G6TXDQCVTvnccVZvf5b0C51ztj+x34VqFNYLAOcc8rMwV9nlx1FpU5vdubJZPr7yfA/p2LN18rEAYKvx/DelRUC1fJ6xVMnnsOAqxbxbKT/rdEh5o3SsufSs0jgcZeWeYxTyhlcKlu3btyMjIwMbN25EYmIi1qxZg+TkZBw/frzNORgOHjyI1NRUZGdn49/+7d/w+uuvY9asWTh27JjbvC3tafkq1xQc7JWCxWzxTsFiCpH/I2O2eK9gMQfJ7wwqsQAQECC/jUoFS4BawWIOkn+rBwQqFCwOhYJFV3zuFIoyXeF5DjAFKY1D0yTfe6Il/uJ+WrnUeePnYw0PNSE8zPN+3qRQgKjGN+sKsU7519vmVEvz5x0Kf3Tt8nkpyKb2vgsMkI8PMFmkY81O+QIEAEw2+XhTk/xzZ7bI7yfKeTdQvmAJMMvHmhVzB2Ryh0Le8MoPzS+88AIWL16MhQsXYuTIkdi4cSN69OiBTZs2tRn/xz/+EdOnT8fDDz+Mq6++Gk8++SSuu+46rFu3zhvDIyIDYt4goo50ecFis9lw9OhRTJs27acHMZkwbdo0HDp0qM37HDp0yC0eAJKTk9uNb25uRm1trduFiLqvS5E3AOYOou6sywuWyspKOJ1OREVFuS2PiopCWVlZm/cpKytTis/OzobVanVdeNAcUfd2KfIGwNxB1J11j3MPL5CZmYmamhrXpaSkxNdDIqJugLmDqPvq8oNu+/TpA7PZjPJy96ODy8vLER0d3eZ9oqOjleItFgssFvmDrIjI2C5F3gCYO4i6sy7/hiUoKAhjx45Fbm6ua5mu68jNzUVSUlKb90lKSnKLB4C9e/e2G09E/oV5g4g88cppzRkZGUhLS8P111+PcePGYc2aNWhoaMDChQsBAAsWLEC/fv2QnZ0NAHjooYcwadIkPP/887j99tvx5ptv4siRI3jllVe8MTwiMiDmDSLqiFcKlnnz5qGiogIrVqxAWVkZxowZgz179rgOkCsuLnZrwXvjjTfi9ddfx+OPP47HHnsMQ4cOxc6dO6V7KbQIOK/BJDyfyx1Qr9Ynom6IfK+N4P518iuukm9g1BTrkF8vgB7FCn1H5PsGQQ9UfO4GyJ+3L/HS/RSr0N4FUOu9UzNYfuWawkBCT8n3OwCA5gj5plxhBfLvO9EvUmkcmuSZNJowAfK97lrxVd4g6pC3ZmwwykwQKn2TfDzlhiaEYvtNA6qtrYXVakXciqelGsepFixNUd4pWBoVChboamNWKVhCS+TfAqqFglB4fxulYLGFyw9EU9h7VAsWlUSiUrBoTrVdXvyrQCrOIez4qPkt1NTUIDw8XOkxfKUld5z7vzipxnFnFDvdVig0jqtS6Eh71infBbbaqdbZ9axCp9tKu3xshU2tA3JFk8K6z8sX91W18rEA0KzS6fasQkdtlU63FWr7bI8KhU63ZV7sdPvDac8xwo592CWVN7rlWUJERER0eWHBQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHhsWAhIiIiw2PBQkRERIbnlbmEfMUW6YApxPOcO7YotTbHVw/x3F64xakaq3RseN966djmLyOkYwG1lvj1/eVbRJvUpjRSoiu029flpygCADiDFaYfUOi3rzJFgB6gNp9Aj3L5cTRFybdf71FwTmkcWoTce9qk24AzSqumS8CpkgwAOBUmuXEqfObVVebe8CKVqXN+vIPC3wuV6UVUpi1R/WpBZRuVnxDf4TcsREREZHgsWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDI8FCxERERlelxcs2dnZuOGGGxAWFoa+ffti1qxZOH78eIf3ycnJgaZpbpfg4OCuHhoRGRTzBhF50uUFy/79+7FkyRIcPnwYe/fuhd1ux6233oqGhoYO7xceHo7S0lLX5eTJk109NCIyKOYNIvKkyzvd7tmzx+16Tk4O+vbti6NHj2LixInt3k/TNERHR3f1cIioG2DeICJPvN6av6amBgDQu3fvDuPq6+sxcOBA6LqO6667Ds888wxGjRrVZmxzczOam5td12trawEAWpATWpDT45ii+tbIDv/Hx3PKP02Desm3Pf86b5B0rBaqNp0AIB9vssm3ZlZt4qzUbj9El44VIZ5f558zS0zZ0CIgQH7dtkb5OQIagtV2N5NN/skLPqfQTt0aojQOc5Xke1pXe0064o28AbSfO/yZSvt8ANAV+sB7s92+rpxt5GgqrfYBxXb7CtN6mBRWrPhbiMqUIUovoY/b+Hv1oFtd1/H73/8eN910E0aPHt1u3PDhw7Fp0ybs2rULW7duha7ruPHGG3Hq1Kk247Ozs2G1Wl2X/v37e2sTiOgS81beAJg7iLozrxYsS5Yswddff40333yzw7ikpCQsWLAAY8aMwaRJk7Bjxw5ERkbi5ZdfbjM+MzMTNTU1rktJSYk3hk9EPuCtvAEwdxB1Z177SWjp0qX4n//5Hxw4cABXXnml0n0DAwNx7bXXIj8/v83bLRYLLBZLVwyTiAzEm3kDYO4g6s66/BsWIQSWLl2Kd955Bx9++CEGDx6svA6n04mvvvoKMTExXT08IjIg5g0i8qTLv2FZsmQJXn/9dezatQthYWEoKysDAFitVoSE/Hiw34IFC9CvXz9kZ2cDAJ544gn84he/wFVXXYXq6mo899xzOHnyJBYtWtTVwyMiA2LeICJPurxg2bBhAwBg8uTJbss3b96M9PR0AEBxcTFMpp++3Dl37hwWL16MsrIy9OrVC2PHjsXBgwcxcuTIrh4eERkQ8wYRedLlBYsQnk/r2rdvn9v1F198ES+++GJXD4WIugnmDSLyhHMJERERkeGxYCEiIiLDY8FCREREhuf11vyXUlCIHeYennsS9w+rVlqvSaGV8ydfXSW/YotCK3qzYjtph3wLZV2lU3uA2jhMCi3xQ0ObpGMjQzueFK9VfEi9UryssoZw6diTJX2U1m2LkO+vrQcqTK9gl3/f0aWj0hLfptB7XbV9vtNLrfm91WofUJsyRLU1v6aQe5Va4qtMW6L4l1o3yz8jwqzwvYU/t+YnIiIi6gosWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDI8FCxERERkeCxYiIiIyPBYsREREZHgsWIiIiMjw/Ko1v9A16Lrn1sHHK/sqrbfulHz7daXW9YEKLdIltstNkPy6TUFO6djgHjalYUSGybfQHxJeKR07IrRUaRyxgdXSsQ26RTr2E3OcdGxZWJh0LABoepB0rFOhNb+pQX4KBADQAuTShKaz5f+FnApN41ViVdr424Vamrcr9Ix3KnzmdSpOEeAtyt3lTQqt+RXyv0q7fT1AbdBCqTW/wrpNbM1PRERE1CEWLERERGR4XV6wZGVlQdM0t8uIESM6vM/bb7+NESNGIDg4GNdccw12797d1cMiIgNj3iAiT7zyDcuoUaNQWlrqunz88cftxh48eBCpqam477778Pnnn2PWrFmYNWsWvv76a28MjYgMinmDiDrilYIlICAA0dHRrkufPn3ajf3jH/+I6dOn4+GHH8bVV1+NJ598Etdddx3WrVvnjaERkUExbxBRR7xSsJw4cQKxsbGIi4vD3XffjeLi4nZjDx06hGnTprktS05OxqFDh9q9T3NzM2pra90uRNS9eTtvAMwdRN1ZlxcsiYmJyMnJwZ49e7BhwwYUFhZiwoQJqKurazO+rKwMUVFRbsuioqJQVlbW7mNkZ2fDarW6Lv379+/SbSCiS+tS5A2AuYOoO+vygiUlJQVz5sxBfHw8kpOTsXv3blRXV+Ott97qssfIzMxETU2N61JSUtJl6yaiS+9S5A2AuYOoO/N647iIiAgMGzYM+fn5bd4eHR2N8vJyt2Xl5eWIjo5ud50WiwUWi3xzLyLqXryRNwDmDqLuzOt9WOrr61FQUICYmJg2b09KSkJubq7bsr179yIpKcnbQyMig2LeIKILdXnBsmzZMuzfvx9FRUU4ePAgZs+eDbPZjNTUVADAggULkJmZ6Yp/6KGHsGfPHjz//PP47rvvkJWVhSNHjmDp0qVdPTQiMijmDSLypMt/Ejp16hRSU1NRVVWFyMhIjB8/HocPH0ZkZCQAoLi4GCbTT3XSjTfeiNdffx2PP/44HnvsMQwdOhQ7d+7E6NGjlR9biB8vntSdtCqt12SXj9WDFVbskJ+zQ2WOCgCAWT4+QGEuoV49zysNY3hEueeg/y8pvEA69rrg9s8gaUukySEdW+KU/8mguPkK6ViTwpwkAKAy9YrZpvj+UKBX18jFCbV5pn7Ol3nDm3SFF1Flzh+b0nw/avO/6ArxDl1+HCrzH/0Y7515a0wmtTmvNIX9VlfIuyo5XSjOJaQyT5EIUHhdTL5tjt/lBcubb77Z4e379u1rtWzOnDmYM2dOVw+FiLoJ5g0i8oRzCREREZHhsWAhIiIiw2PBQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHheX225ksp6PNQmC2ee+MHBqqtV1eY3FU0KrRxVhiHHqTWet2peadVe89Atfbr0ZZa6di4oDPysYrv3FBTqHRso6iXjjVr8m2+nU61zwcBClNCaLpCm+9gtR1At8kNRBfy0x90V06ZuT9+xqbwmVCl3b5KG3+7wnoBwK7Sbl+hjb9qq31vteZXpdKaX+UrAIWnWanV/o/x8s+dMCs8z5pvXxN+w0JERESGx4KFiIiIDI8FCxERERkeCxYiIiIyPBYsREREZHgsWIiIiMjwWLAQERGR4XV5wTJo0CBomtbqsmTJkjbjc3JyWsUGB3vupUJE/oW5g4g60uWN4z777DM4nU7X9a+//hq33HIL5syZ0+59wsPDcfz4cdd1zcfNaYjo0mPuIKKOdHnBEhkZ6Xb9P/7jPzBkyBBMmjSp3ftomobo6OiuHgoRdSPMHUTUEa+25rfZbNi6dSsyMjI6/ORTX1+PgQMHQtd1XHfddXjmmWcwatSoduObm5vR3Nzsul5b+2P7d2H+8eKJpVqtvbYwKbSfVmiJrNLy3xGi9slRpau1PVj+bVBvD1IaR6NTPr5R4QlpFA1K44DeJB1a4QyRjq2yybf8d9jUWqQHKXS6N9nl39PaebXpFUxBcq38TUIA8k9zhy517pDl9BziRhfyv7rrCr/QOxVi7Yp93VVa+TsVEo1KG39vUh2FUrxCG3+VGRPUW/OrxKq08fftYa9effSdO3eiuroa6enp7cYMHz4cmzZtwq5du7B161bouo4bb7wRp06davc+2dnZsFqtrkv//v29MHoi8hXmDiK6kFcLlj//+c9ISUlBbGxsuzFJSUlYsGABxowZg0mTJmHHjh2IjIzEyy+/3O59MjMzUVNT47qUlJR4Y/hE5CPMHUR0Ia/9JHTy5El88MEH2LFjh9L9AgMDce211yI/P7/dGIvFAotF4fcUIuo2mDuIqC1e+4Zl8+bN6Nu3L26//Xal+zmdTnz11VeIiYnx0siIyMiYO4ioLV4pWHRdx+bNm5GWloaAAPcvcRYsWIDMzEzX9SeeeALvv/8+vv/+exw7dgz33HMPTp48iUWLFnljaERkYMwdRNQer/wk9MEHH6C4uBi//vWvW91WXFwMk+mnOuncuXNYvHgxysrK0KtXL4wdOxYHDx7EyJEjvTE0IjIw5g4iao9XCpZbb70VQrR9ete+ffvcrr/44ot48cUXvTEMIupmmDuIqD2cS4iIiIgMjwULERERGR4LFiIiIjI8FixERERkeF6dS+hSCy9yIiBQdbYPz5TmEpKbdgUA4LDIr9cUqjYDhqbL16JNZvn5fios8nPnAMC/guUnprMGnJeOtalMxAEgWLNLx37b3E86Nr+uj3Ss3qDw5gBgUpnyR2V6LJPa5xQtWK7Rmia0LptLyKgUpmwCADgVZqJReU+rzPejOoePrjA/kENXGIfKBGeK8YovixJNU1i7yiYqrVdxLjmFXVzl75vqOLoav2EhIiIiw2PBQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHhsWAhIiIiw2PBQkRERIbnV635g885EBDg8BinByi2uVfooKyybmewfL0Y0Kw2ZpNdoRZVaOPfLHoojeM7PUo6trY5WDr229AYpXGEmOVb85c3hUnHFlX2lo4NqFWbTsBsk3/jqbxHodKKGwACJadu0NVWezlwKnwm1BX6qavEOlX6tENxzAq96FVb8wvFeOn1qsZ7aRwqbfxVh6D0kitNJ8DW/EREREQdUi5YDhw4gBkzZiA2NhaapmHnzp1utwshsGLFCsTExCAkJATTpk3DiRMnPK53/fr1GDRoEIKDg5GYmIhPP/1UdWhEZFDMG0TUWcoFS0NDAxISErB+/fo2b1+9ejVeeuklbNy4EZ988gl69uyJ5ORkNDW1P43r9u3bkZGRgZUrV+LYsWNISEhAcnIyzpw5ozo8IjIg5g0i6izlgiUlJQVPPfUUZs+e3eo2IQTWrFmDxx9/HDNnzkR8fDxee+01nD59utUnqp974YUXsHjxYixcuBAjR47Exo0b0aNHD2zatEl1eERkQMwbRNRZXXoMS2FhIcrKyjBt2jTXMqvVisTERBw6dKjN+9hsNhw9etTtPiaTCdOmTWv3Ps3NzaitrXW7EFH3dKnyBsDcQdSddWnBUlZWBgCIinI/MyQqKsp124UqKyvhdDqV7pOdnQ2r1eq69O/fvwtGT0S+cKnyBsDcQdSddcuzhDIzM1FTU+O6lJSU+HpIRNQNMHcQdV9dWrBER0cDAMrLy92Wl5eXu267UJ8+fWA2m5XuY7FYEB4e7nYhou7pUuUNgLmDqDvr0oJl8ODBiI6ORm5urmtZbW0tPvnkEyQlJbV5n6CgIIwdO9btPrquIzc3t937EJH/YN4gIhnKnW7r6+uRn5/vul5YWIi8vDz07t0bAwYMwO9//3s89dRTGDp0KAYPHozly5cjNjYWs2bNct1n6tSpmD17NpYuXQoAyMjIQFpaGq6//nqMGzcOa9asQUNDAxYuXNj5LSQin2PeIKLOUi5Yjhw5gilTpriuZ2RkAADS0tKQk5ODP/zhD2hoaMD999+P6upqjB8/Hnv27EFw8E9t1wsKClBZWem6Pm/ePFRUVGDFihUoKyvDmDFjsGfPnlYH1HkSWNOMAInu53pwoNJ6Nad8z3FhVmhrfV4+1tyk2tZdJVa+3bLZpvalXHNjiHRsSbVkC3gAp3r2UhqHKVD+NdRtCs91rfwuFFyrOL2CQmt+ldbdIlDtvWQKlZuOQdPNQGXbtxk5b6hQnX1AqYW+whfeTpWW+Eq919Vb6Htrvd5q+6/aal+plb9KsOocAQqUpuroRjQhRLfftNraWlitVky59lEEmC0e4w1TsATJxzpC1P7I2EPl45vD5Xfg5l5qO3tzL/m3lz3CKR2r9fQ8Z9TPGaJgOaP2GvYolX/uQk/LPx8hP9QpjcNUd14qzqE344Oidaipqek2x4a05I5z/xeH8DDP+2OBvV5p/SUO+efhB4d8EV7hkJ/v6pyjp3QsAJyzy88XVueQn/+rxiYfCwB1dvn4epv8h536Js9/I37u/Hn5dTvr5f+2mBXmFgs6p/ZBsccZhdzxg3zu6FFwTmkczuP5HmMcwo592CWVN7rlWUJERER0eWHBQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHhsWAhIiIiw2PBQkRERIanPJeQPzDXNCnewTtza5gU5nQxNSvOJdQs/9IGnJdfd2CDWo1rqZZ/7uyh8mN2hKi9dXX57tpKc3yYFOZsClTr6q40H5TK3CHOULXW5DDJvea687JMJ4bnVJjPCFCc/0hhXh6V9f4Y7535gVTnElKaqEuXj9WUYuWHAMAwcxp1NX7DQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPCUC5YDBw5gxowZiI2NhaZp2Llzp+s2u92ORx55BNdccw169uyJ2NhYLFiwAKdPn+5wnVlZWdA0ze0yYsQI5Y0hImNi3iCizlIuWBoaGpCQkID169e3uq2xsRHHjh3D8uXLcezYMezYsQPHjx/HHXfc4XG9o0aNQmlpqevy8ccfqw6NiAyKeYOIOku501NKSgpSUlLavM1qtWLv3r1uy9atW4dx48ahuLgYAwYMaH8gAQGIjo5WHQ4RdQPMG0TUWV4/hqWmpgaapiEiIqLDuBMnTiA2NhZxcXG4++67UVxc3G5sc3Mzamtr3S5E5D+8kTcA5g6i7syrvbSbmprwyCOPIDU1FeHh4e3GJSYmIicnB8OHD0dpaSlWrVqFCRMm4Ouvv0ZYWFir+OzsbKxatarVclOjDSaZNvqaYmtm1bbIkjSbQz62Sa01v+m8/LoDGhRa89epvWWcwfLrdoQotAS3qL2GziCFFuJqT7U0k/xLAgAIbJR/49l7yj93eoBaa/4gk9xz53B0zY7irbwBtJ876OKotNvXobbPqrTmd+oK41BoiQ8AulOlNb98qEq7fZNTPhYANIV4lWk9IJkLvMVr37DY7XbMnTsXQghs2LChw9iUlBTMmTMH8fHxSE5Oxu7du1FdXY233nqrzfjMzEzU1NS4LiUlJd7YBCK6xLyZNwDmDqLuzCvfsLQknZMnT+LDDz/s8FNSWyIiIjBs2DDk5+e3ebvFYoHFojiBGxEZmrfzBsDcQdSddfk3LC1J58SJE/jggw9wxRVXKK+jvr4eBQUFiImJ6erhEZEBMW8QkSfKBUt9fT3y8vKQl5cHACgsLEReXh6Ki4tht9vxy1/+EkeOHMG2bdvgdDpRVlaGsrIy2Gw21zqmTp2KdevWua4vW7YM+/fvR1FREQ4ePIjZs2fDbDYjNTW181tIRD7HvEFEnaX8k9CRI0cwZcoU1/WMjAwAQFpaGrKysvDf//3fAIAxY8a43e+jjz7C5MmTAQAFBQWorKx03Xbq1CmkpqaiqqoKkZGRGD9+PA4fPozIyEjV4RGRATFvEFFnKRcskydPhhDtH1bc0W0tioqK3K6/+eabqsMgom6EeYOIOotzCREREZHhsWAhIiIiw2PBQkRERIbHgoWIiIgMz6ut+S81zWaHZvJcg4kQxcZRNrvCIBRaF0scaNjC1KwwBgBCpYVyg3zdag5Q61svAuXjVWJ1i9pbVw9SaPuvECsCvNeq2t5T/vlwBsqvN6BJsYW+7HtadcoLuqyotNoHAKHUml8h1qn2OV0otObXHAqxKn9WFKf1UJkGRNMVevM7vTRPjSR+w0JERESGx4KFiIiIDI8FCxERERkeCxYiIiIyPBYsREREZHgsWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDM+vWvM3DeiNgIBgj3GWH2rUVhwk3/dcO9+stm5ZTqdSuHZerZW/t8hMleCi0PbfFKj41lV4DUWQ/Lr1IPkx23opTgmhMHWDpUahvbYi2dbdSi2+yS+otNtXbc3v0OVzh0qsrtiaH3b5eJNdfhvVYqVDf4x3yu+LmkNhv9XZmp+IiIioQyxYiIiIyPCUC5YDBw5gxowZiI2NhaZp2Llzp9vt6enp0DTN7TJ9+nSP612/fj0GDRqE4OBgJCYm4tNPP1UdGhEZFPMGEXWWcsHS0NCAhIQErF+/vt2Y6dOno7S01HV54403Olzn9u3bkZGRgZUrV+LYsWNISEhAcnIyzpw5ozo8IjIg5g0i6izlg25TUlKQkpLSYYzFYkF0dLT0Ol944QUsXrwYCxcuBABs3LgR7777LjZt2oRHH31UdYhEZDDMG0TUWV45hmXfvn3o27cvhg8fjgceeABVVVXtxtpsNhw9ehTTpk37aVAmE6ZNm4ZDhw61eZ/m5mbU1ta6XYioe/N23gCYO4i6sy4vWKZPn47XXnsNubm5ePbZZ7F//36kpKTA2c5puZWVlXA6nYiKinJbHhUVhbKysjbvk52dDavV6rr079+/qzeDiC6hS5E3AOYOou6sy/uwzJ8/3/X/a665BvHx8RgyZAj27duHqVOndsljZGZmIiMjw3W9traWiYeoG7sUeQNg7iDqzrx+WnNcXBz69OmD/Pz8Nm/v06cPzGYzysvL3ZaXl5e3+3u2xWJBeHi424WI/Ic38gbA3EHUnXm9YDl16hSqqqoQExPT5u1BQUEYO3YscnNzXct0XUdubi6SkpK8PTwiMiDmDSK6kHLBUl9fj7y8POTl5QEACgsLkZeXh+LiYtTX1+Phhx/G4cOHUVRUhNzcXMycORNXXXUVkpOTXeuYOnUq1q1b57qekZGBV199FVu2bMG//vUvPPDAA2hoaHAd/U9E3RvzBhF1lvIxLEeOHMGUKVNc11t+D05LS8OGDRvw5ZdfYsuWLaiurkZsbCxuvfVWPPnkk7BYfppHpaCgAJWVla7r8+bNQ0VFBVasWIGysjKMGTMGe/bsaXVAnSdBVY0IMHuec8fZq4fSes3nGqVjRXCQdKzWcF5+EE61ORyEQ2HuIbtNPtab88WYFOYaMcvP4QMAWpDC66Iw7xAiQqVDzTaF9QIIqVB4zRWeOkew2nNX39/z/FwA4OhgvhMj5w0Vqp/wTJr8a2iGSqz8fmhWGIM3qc4l5FSZp0hpLiG1cUAhXvPS/EAmlfl+AJgcCrF2+feHpvJ3xQs0IRRmWDOo2tpaWK1W3Dz6YQSYPU8wp/eQ/+MFqBUsMMvvOF4tWJoVihAWLO4UChZdoWCx9w6RHwMAhb9JXi1YbFa5eIe9CUf/+jhqamq6zbEhLbnj3P/FITzM875baK9XWn+JU/79cdreSzq2wiH//FY65McAAFU2+fizNvkPf2eb1T4o1jTLFcoAUHdePvZ8g1r+1+vl80FAjfy+FVQjv9NaqtXybkiVfHyPUvkJe4NK2m810BZHUbHnGGHHPuySyhucS4iIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHhsWAhIiIiw2PBQkRERIbHgoWIiIgMT7k1v5GJQDOERAdUzaHWNVallb/5bIN0rFBpAV+r1mETunwLZWGX7+MsHAo9nwGvdcbVFDoKA4Boku/maAoPkx+HTf75CDoj/94AAD1Ifve0XSHf6VOVPUSuI6fTrNjyvBvy5iaqtPE3Cl2hxbJQbc2v0G7f6VRoze9Qyx2aQ6HdvkLneqVYxbSr0spf6e+hYsf1rsZvWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDI8FCxERERkeCxYiIiIyPOWC5cCBA5gxYwZiY2OhaRp27tzpdrumaW1ennvuuXbXmZWV1Sp+xIgRyhtDRMbEvEFEnaVcsDQ0NCAhIQHr169v8/bS0lK3y6ZNm6BpGu66664O1ztq1Ci3+3388ceqQyMig2LeIKLOUu50m5KSgpSUlHZvj46Odru+a9cuTJkyBXFxcR0PJCCg1X2JyD8wbxBRZ3m1NX95eTneffddbNmyxWPsiRMnEBsbi+DgYCQlJSE7OxsDBgxoM7a5uRnNzT+1Wq+trQUANEWGICDQc4tyy1n5Nu0AYGq0yQcHeJ4a4GJoQUFK8eJ8k/y6JaYzcK23We25Eyqt+RWmEwDUng+TwvMn7Hb5FVeclQ7VLBb59QJA7BXSoSqtuGsHqu32gefl1q3Zu2YaBm/lDaD93OEtZninlbk32/irtNvXFdrtq6wXAJy6fLzTqbBulViotebXFFKYt2IBwKSwL5pUWvML70y1IsurB91u2bIFYWFhuPPOOzuMS0xMRE5ODvbs2YMNGzagsLAQEyZMQF1dXZvx2dnZsFqtrkv//v29MXwi8gFv5Q2AuYOoO/NqwbJp0ybcfffdCA7u+FuPlJQUzJkzB/Hx8UhOTsbu3btRXV2Nt956q834zMxM1NTUuC4lJSXeGD4R+YC38gbA3EHUnXntJ6H//d//xfHjx7F9+3bl+0ZERGDYsGHIz89v83aLxQKL6tfrRGR43swbAHMHUXfmtW9Y/vznP2Ps2LFISEhQvm99fT0KCgoQExPjhZERkVExbxBRe5QLlvr6euTl5SEvLw8AUFhYiLy8PBQXF7tiamtr8fbbb2PRokVtrmPq1KlYt26d6/qyZcuwf/9+FBUV4eDBg5g9ezbMZjNSU1NVh0dEBsS8QUSdpfyT0JEjRzBlyhTX9YyMDABAWloacnJyAABvvvkmhBDtJo6CggJUVla6rp86dQqpqamoqqpCZGQkxo8fj8OHDyMyMlJ1eERkQMwbRNRZmhA+Pk+pC9TW1sJqtSLp1lU+P61ZUzlFTIFW16gUL2q8c7qm3qg4Di+d1qwFKp7WHOL5feESFKi0blmqpzU7FU5rdvSUH3P1ELVxyJ7W7LQ34ehbj6Ompgbh4eFKj+ErLbnj3P/FITzM8xfOxY56pfWXOHpIx/7g6CUdW+GQf37P2NVeiwpbmHRsZXNP6dhzzfLPBQCcbQyRjq1rkN+/7fWKuaNO/nN9YJ38KdBBNfKxlnNqf6Z7VMjn0pCy89Kx5lL5Ng4A4Dj1g+cYYcc+7JLKG5xLiIiIiAyPBQsREREZHgsWIiIiMjwWLERERGR4Xp1L6FIz2XWYhOeDXu2hagdVmkLkn6agsvbbgl9Ia1KYo0hlfhsAWi+rdKxeXiEdK5xqk1qYFA40FU75OY00s1qt7axvUIqXZQ6VP+hQ9I5QWrdml3+unQrPs8mhNAxA9ni/bn/4vm+ZvfQEmhTXqxovS2XeIQAQKvHeigXU3tdK45APVZ46yk/3RX7DQkRERIbHgoWIiIgMjwULERERGR4LFiIiIjI8FixERERkeCxYiIiIyPBYsBAREZHhsWAhIiIiw2PBQkRERIbnF51uhfixrZ/D0eyV9Zuc8m0DTU75MWi6QqdblVgA0OXHrAv5detCreOuSaHzY8vrKENT7FapC9X2rnKEwnMHhfcGAAiFpsIOh/xnD6dNrVsx7HKvi9PeBEDtdfS1lrHW1su1Eq1zqLUcbVCIb3TIvy7nnfLv52bFLtk2m3y83Sb//nc0qf25cZ6Xf0/rjfLr1c8rto1tku/A7WxS2A8V0oHTprZPORS6ZDucTdKxQlfLYQ6JvxcO/Bgjkzf8omCpq/uxHf4n//sfPh4JtSK/L6hRy8HeU+ul2G6srq4OVqv81BC+1JI7Bl5X5NuBEF3mZPKGJrrTx6F26LqO06dPIywsDJr20yfv2tpa9O/fHyUlJQgPD/fhCL3D37cP8P9t9KftE0Kgrq4OsbGxMJm6x6/NzB3cvu7KX7ZRJW/4xTcsJpMJV155Zbu3h4eHd+sX1BN/3z7A/7fRX7avu3yz0oK5g9vX3fnDNsrmje7xMYiIiIguayxYiIiIyPD8umCxWCxYuXIlLBaLr4fiFf6+fYD/b6O/b1935e+vC7ev+7sctvFCfnHQLREREfk3v/6GhYiIiPwDCxYiIiIyPBYsREREZHgsWIiIiMjwWLAQERGR4fl1wbJ+/XoMGjQIwcHBSExMxKeffurrIXWJrKwsaJrmdhkxYoSvh9UpBw4cwIwZMxAbGwtN07Bz506324UQWLFiBWJiYhASEoJp06bhxIkTvhnsRfC0fenp6a1e0+nTp/tmsJc5f80bgP/lDuaNyytv+G3Bsn37dmRkZGDlypU4duwYEhISkJycjDNnzvh6aF1i1KhRKC0tdV0+/vhjXw+pUxoaGpCQkID169e3efvq1avx0ksvYePGjfjkk0/Qs2dPJCcno6nJW7Mrdi1P2wcA06dPd3tN33jjjUs4QgL8P28A/pU7mDcus7wh/NS4cePEkiVLXNedTqeIjY0V2dnZPhxV11i5cqVISEjw9TC8BoB45513XNd1XRfR0dHiueeecy2rrq4WFotFvPHGGz4YYedcuH1CCJGWliZmzpzpk/HQT/w5bwjh37mDecP/+eU3LDabDUePHsW0adNcy0wmE6ZNm4ZDhw75cGRd58SJE4iNjUVcXBzuvvtuFBcX+3pIXlNYWIiysjK319NqtSIxMdFvXk8A2LdvH/r27Yvhw4fjgQceQFVVla+HdFm5HPIGcPnkDuYN/+OXBUtlZSWcTieioqLclkdFRaGsrMxHo+o6iYmJyMnJwZ49e7BhwwYUFhZiwoQJqKur8/XQvKLlNfPX1xP48Wvd1157Dbm5uXj22Wexf/9+pKSkwOl0+npolw1/zxvA5ZU7mDf8T4CvB0DqUlJSXP+Pj49HYmIiBg4ciLfeegv33XefD0dGF2v+/Pmu/19zzTWIj4/HkCFDsG/fPkydOtWHIyN/wtzhXy63vOGX37D06dMHZrMZ5eXlbsvLy8sRHR3to1F5T0REBIYNG4b8/HxfD8UrWl6zy+X1BIC4uDj06dPHb19TI7rc8gbg37mDecP/+GXBEhQUhLFjxyI3N9e1TNd15ObmIikpyYcj8476+noUFBQgJibG10PxisGDByM6Otrt9aytrcUnn3zil68nAJw6dQpVVVV++5oa0eWWNwD/zh3MG/7Hb38SysjIQFpaGq6//nqMGzcOa9asQUNDAxYuXOjroXXasmXLMGPGDAwcOBCnT5/GypUrYTabkZqa6uuhXbT6+nq3TwWFhYXIy8tD7969MWDAAPz+97/HU089haFDh2Lw4MFYvnw5YmNjMWvWLN8NWkFH29e7d2+sWrUKd911F6Kjo1FQUIA//OEPuOqqq5CcnOzDUV9+/DlvAP6XO5g3LrO84evTlLxp7dq1YsCAASIoKEiMGzdOHD582NdD6hLz5s0TMTExIigoSPTr10/MmzdP5Ofn+3pYnfLRRx8JAK0uaWlpQogfT1Fcvny5iIqKEhaLRUydOlUcP37ct4NW0NH2NTY2iltvvVVERkaKwMBAMXDgQLF48WJRVlbm62Fflvw1bwjhf7mDeePyyhuaEEJc2hKJiIiISI1fHsNCRERE/oUFCxERERkeCxYiIiIyPBYsREREZHgsWIiIiMjwWLAQERGR4bFgISIiIsNjwUJERESGx4KFiIiIDI8FCxERERkeCxYiIiIyvP8HXODpCG4iMjAAAAAASUVORK5CYII=" - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "execution_count": 3 - }, - { - "cell_type": "code", - "id": "082ab7a8-22e0-498b-b138-158dc9f2658f", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.122858Z", - "start_time": "2024-09-19T13:35:29.119985Z" - } - }, - "source": [ - "u_train.labels[3]['dof']" - ], - "outputs": [ - { - "data": { - "text/plain": [ - "['u']" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": 4 - }, - { - "cell_type": "markdown", - "id": "89a77ff1", - "metadata": {}, - "source": [ - "We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`." - ] - }, - { - "cell_type": "code", - "id": "8b27d283", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:29.136572Z", - "start_time": "2024-09-19T13:35:29.134124Z" - } - }, - "source": [ - "class NeuralOperatorSolver(AbstractProblem):\n", - " input_variables = k_train.labels[3]['dof']\n", - " output_variables = u_train.labels[3]['dof']\n", - " domains = {\n", - " 'pts': k_train\n", - " }\n", - " conditions = {'data' : Condition(domain='pts', \n", - " output_points=u_train)}\n", - "\n", - "# make problem\n", - "problem = NeuralOperatorSolver()" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "execution_count": 5 - }, - { - "cell_type": "markdown", - "id": "1096cc20", - "metadata": {}, - "source": [ - "## Solving the problem with a FeedForward Neural Network\n", - "\n", - "We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning." - ] - }, - { - "cell_type": "code", - "id": "e34f18b0", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:31.245429Z", - "start_time": "2024-09-19T13:35:29.154937Z" - } - }, - "source": [ - "# make model\n", - "model = FeedForward(input_dimensions=1, output_dimensions=1)\n", - "\n", - "\n", - "# make solver\n", - "solver = SupervisedSolver(problem=problem, model=model)\n", - "\n", - "# make the trainer and train\n", - "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) \n", - "# We train on CPU and avoid model summary at the beginning of training (optional)\n", - "trainer.train()" - ], - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (mps), used: False\n", - "TPU available: False, using: 0 TPU cores\n", - "HPU available: False, using: 0 HPUs\n", - "/Users/filippoolivo/miniconda3/envs/PINAv0.2/lib/python3.11/site-packages/pytorch_lightning/trainer/setup.py:177: GPU available but not used. You can set it by doing `Trainer(accelerator='gpu')`.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 552.80it/s, v_num=18, mean_loss=0.113]" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "`Trainer.fit` stopped: `max_epochs=10` reached.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:00<00:00, 547.37it/s, v_num=18, mean_loss=0.113]\n" - ] - } - ], - "execution_count": 6 - }, - { - "cell_type": "markdown", - "id": "7b2c35be", - "metadata": {}, - "source": [ - "The final loss is pretty high... We can calculate the error by importing `LpLoss`." - ] - }, - { - "cell_type": "code", - "id": "0e2a6aa4", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:31.295336Z", - "start_time": "2024-09-19T13:35:31.256308Z" - } - }, - "source": [ - "from pina.loss import LpLoss\n", - "\n", - "# make the metric\n", - "metric_err = LpLoss(relative=True)\n", - "\n", - "model = solver.models[0]\n", - "err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100\n", - "print(f'Final error training {err:.2f}%')\n", - "\n", - "err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100\n", - "print(f'Final error testing {err:.2f}%')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Final error training 56.05%\n", - "Final error testing 55.95%\n" - ] - } - ], - "execution_count": 7 - }, - { - "cell_type": "markdown", - "id": "6b5e5aa6", - "metadata": {}, - "source": [ - "## Solving the problem with a Fourier Neural Operator (FNO)\n", - "\n", - "We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see." - ] - }, - { - "cell_type": "code", - "id": "9af523a5", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:44.717807Z", - "start_time": "2024-09-19T13:35:31.306689Z" - } - }, - "source": [ - "# make model\n", - "lifting_net = torch.nn.Linear(1, 24)\n", - "projecting_net = torch.nn.Linear(24, 1)\n", - "model = FNO(lifting_net=lifting_net,\n", - " projecting_net=projecting_net,\n", - " n_modes=8,\n", - " dimensions=2,\n", - " inner_size=24,\n", - " padding=8)\n", - "\n", - "\n", - "# make solver\n", - "solver = SupervisedSolver(problem=problem, model=model)\n", - "\n", - "# make the trainer and train\n", - "trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)\n", - "trainer.train()" - ], - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GPU available: True (mps), used: False\n", - "TPU available: False, using: 0 TPU cores\n", - "HPU available: False, using: 0 HPUs\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 73.04it/s, v_num=19, mean_loss=0.00215]" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "`Trainer.fit` stopped: `max_epochs=10` reached.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 72.84it/s, v_num=19, mean_loss=0.00215]\n" - ] - } - ], - "execution_count": 8 - }, - { - "cell_type": "markdown", - "id": "84964cb9", - "metadata": {}, - "source": [ - "We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used." - ] - }, - { - "cell_type": "code", - "id": "58e2db89", - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:35:45.259819Z", - "start_time": "2024-09-19T13:35:44.729042Z" - } - }, - "source": [ - "model = solver.models[0]\n", - "\n", - "err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100\n", - "print(f'Final error training {err:.2f}%')\n", - "\n", - "err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100\n", - "print(f'Final error testing {err:.2f}%')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Final error training 7.48%\n", - "Final error testing 7.73%\n" - ] - } - ], - "execution_count": 9 - }, - { - "cell_type": "markdown", - "id": "26e3a6e4", - "metadata": {}, - "source": [ - "As we can see the loss is way lower!" - ] - }, - { - "cell_type": "markdown", - "id": "ba1dfa4b", - "metadata": {}, - "source": [ - "## What's next?\n", - "\n", - "We have made a very simple example on how to use the `FNO` for learning neural operator. Currently in **PINA** we implement 1D/2D/3D cases. We suggest to extend the tutorial using more complex problems and train for longer, to see the full potential of neural operators." - ] - }, - { - "metadata": { - "ExecuteTime": { - "end_time": "2024-09-19T13:08:35.195331Z", - "start_time": "2024-09-19T13:08:35.193830Z" - } - }, - "cell_type": "code", - "source": "", - "id": "af932a706dd1b71f", - "outputs": [], - "execution_count": null - } - ], - "metadata": { - "interpreter": { - "hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -}