Implementation of DataLoader and DataModule (#383)
Refactoring for 0.2 * Data module, data loader and dataset * Refactor LabelTensor * Refactor solvers Co-authored-by: dario-coscia <dariocos99@gmail.com>
This commit is contained in:
committed by
Nicola Demo
parent
dd43c8304c
commit
a27bd35443
@@ -2,14 +2,11 @@
|
||||
Import data classes
|
||||
"""
|
||||
__all__ = [
|
||||
'PinaDataLoader', 'SupervisedDataset', 'SamplePointDataset',
|
||||
'UnsupervisedDataset', 'Batch', 'PinaDataModule', 'BaseDataset'
|
||||
'PinaDataModule',
|
||||
'PinaDataset'
|
||||
]
|
||||
|
||||
from .pina_dataloader import PinaDataLoader
|
||||
from .supervised_dataset import SupervisedDataset
|
||||
from .sample_dataset import SamplePointDataset
|
||||
from .unsupervised_dataset import UnsupervisedDataset
|
||||
from .pina_batch import Batch
|
||||
|
||||
|
||||
from .data_module import PinaDataModule
|
||||
from .base_dataset import BaseDataset
|
||||
from .dataset import PinaDataset
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
"""
|
||||
Basic data module implementation
|
||||
"""
|
||||
import torch
|
||||
import logging
|
||||
|
||||
from torch.utils.data import Dataset
|
||||
|
||||
from ..label_tensor import LabelTensor
|
||||
|
||||
|
||||
class BaseDataset(Dataset):
|
||||
"""
|
||||
BaseDataset class, which handle initialization and data retrieval
|
||||
:var condition_indices: List of indices
|
||||
:var device: torch.device
|
||||
"""
|
||||
|
||||
def __new__(cls, problem=None, device=torch.device('cpu')):
|
||||
"""
|
||||
Ensure correct definition of __slots__ before initialization
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.device device: The device on which the
|
||||
dataset will be loaded.
|
||||
"""
|
||||
if cls is BaseDataset:
|
||||
raise TypeError(
|
||||
'BaseDataset cannot be instantiated directly. Use a subclass.')
|
||||
if not hasattr(cls, '__slots__'):
|
||||
raise TypeError(
|
||||
'Something is wrong, __slots__ must be defined in subclasses.')
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, problem=None, device=torch.device('cpu')):
|
||||
""""
|
||||
Initialize the object based on __slots__
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.device device: The device on which the
|
||||
dataset will be loaded.
|
||||
"""
|
||||
super().__init__()
|
||||
self.empty = True
|
||||
self.problem = problem
|
||||
self.device = device
|
||||
self.condition_indices = None
|
||||
for slot in self.__slots__:
|
||||
setattr(self, slot, [])
|
||||
self.num_el_per_condition = []
|
||||
self.conditions_idx = []
|
||||
if self.problem is not None:
|
||||
self._init_from_problem(self.problem.collector.data_collections)
|
||||
self.initialized = False
|
||||
|
||||
def _init_from_problem(self, collector_dict):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
for name, data in collector_dict.items():
|
||||
keys = list(data.keys())
|
||||
if set(self.__slots__) == set(keys):
|
||||
self._populate_init_list(data)
|
||||
idx = [
|
||||
key for key, val in
|
||||
self.problem.collector.conditions_name.items()
|
||||
if val == name
|
||||
]
|
||||
self.conditions_idx.append(idx)
|
||||
self.initialize()
|
||||
|
||||
def add_points(self, data_dict, condition_idx, batching_dim=0):
|
||||
"""
|
||||
This method filled internal lists of data points
|
||||
:param data_dict: dictionary containing data points
|
||||
:param condition_idx: index of the condition to which the data points
|
||||
belong to
|
||||
:param batching_dim: dimension of the batching
|
||||
:raises: ValueError if the dataset has already been initialized
|
||||
"""
|
||||
if not self.initialized:
|
||||
self._populate_init_list(data_dict, batching_dim)
|
||||
self.conditions_idx.append(condition_idx)
|
||||
self.empty = False
|
||||
else:
|
||||
raise ValueError('Dataset already initialized')
|
||||
|
||||
def _populate_init_list(self, data_dict, batching_dim=0):
|
||||
current_cond_num_el = None
|
||||
for slot in data_dict.keys():
|
||||
slot_data = data_dict[slot]
|
||||
if batching_dim != 0:
|
||||
if isinstance(slot_data, (LabelTensor, torch.Tensor)):
|
||||
dims = len(slot_data.size())
|
||||
slot_data = slot_data.permute(
|
||||
[batching_dim] +
|
||||
[dim for dim in range(dims) if dim != batching_dim])
|
||||
if current_cond_num_el is None:
|
||||
current_cond_num_el = len(slot_data)
|
||||
elif current_cond_num_el != len(slot_data):
|
||||
raise ValueError('Different dimension in same condition')
|
||||
current_list = getattr(self, slot)
|
||||
current_list += [
|
||||
slot_data
|
||||
] if not (isinstance(slot_data, list)) else slot_data
|
||||
self.num_el_per_condition.append(current_cond_num_el)
|
||||
|
||||
def initialize(self):
|
||||
"""
|
||||
Initialize the datasets tensors/LabelTensors/lists given the lists
|
||||
already filled
|
||||
"""
|
||||
logging.debug(f'Initialize dataset {self.__class__.__name__}')
|
||||
|
||||
if self.num_el_per_condition:
|
||||
self.condition_indices = torch.cat([
|
||||
torch.tensor([i] * self.num_el_per_condition[i],
|
||||
dtype=torch.uint8)
|
||||
for i in range(len(self.num_el_per_condition))
|
||||
],
|
||||
dim=0)
|
||||
for slot in self.__slots__:
|
||||
current_attribute = getattr(self, slot)
|
||||
if all(isinstance(a, LabelTensor) for a in current_attribute):
|
||||
setattr(self, slot, LabelTensor.vstack(current_attribute))
|
||||
self.initialized = True
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
:return: Number of elements in the dataset
|
||||
"""
|
||||
return len(getattr(self, self.__slots__[0]))
|
||||
|
||||
def __getitem__(self, idx):
|
||||
"""
|
||||
:param idx:
|
||||
:return:
|
||||
"""
|
||||
if not isinstance(idx, (tuple, list, slice, int)):
|
||||
raise IndexError("Invalid index")
|
||||
tensors = []
|
||||
for attribute in self.__slots__:
|
||||
tensor = getattr(self, attribute)
|
||||
if isinstance(attribute, (LabelTensor, torch.Tensor)):
|
||||
tensors.append(tensor.__getitem__(idx))
|
||||
elif isinstance(attribute, list):
|
||||
if isinstance(idx, (list, tuple)):
|
||||
tensor = [tensor[i] for i in idx]
|
||||
tensors.append(tensor)
|
||||
return tensors
|
||||
|
||||
def apply_shuffle(self, indices):
|
||||
for slot in self.__slots__:
|
||||
if slot != 'equation':
|
||||
attribute = getattr(self, slot)
|
||||
if isinstance(attribute, (LabelTensor, torch.Tensor)):
|
||||
setattr(self, 'slot', attribute[[indices]])
|
||||
if isinstance(attribute, list):
|
||||
setattr(self, 'slot', [attribute[i] for i in indices])
|
||||
@@ -1,17 +1,71 @@
|
||||
"""
|
||||
This module provide basic data management functionalities
|
||||
"""
|
||||
|
||||
import logging
|
||||
from lightning.pytorch import LightningDataModule
|
||||
import math
|
||||
import torch
|
||||
import logging
|
||||
from pytorch_lightning import LightningDataModule
|
||||
from .sample_dataset import SamplePointDataset
|
||||
from .supervised_dataset import SupervisedDataset
|
||||
from .unsupervised_dataset import UnsupervisedDataset
|
||||
from .pina_dataloader import PinaDataLoader
|
||||
from .pina_subset import PinaSubset
|
||||
from ..label_tensor import LabelTensor
|
||||
from torch.utils.data import DataLoader, BatchSampler, SequentialSampler, \
|
||||
RandomSampler
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
from .dataset import PinaDatasetFactory
|
||||
|
||||
class Collator:
|
||||
def __init__(self, max_conditions_lengths, ):
|
||||
self.max_conditions_lengths = max_conditions_lengths
|
||||
self.callable_function = self._collate_custom_dataloader if \
|
||||
max_conditions_lengths is None else (
|
||||
self._collate_standard_dataloader)
|
||||
|
||||
@staticmethod
|
||||
def _collate_custom_dataloader(batch):
|
||||
return batch[0]
|
||||
|
||||
def _collate_standard_dataloader(self, batch):
|
||||
"""
|
||||
Function used to collate the batch
|
||||
"""
|
||||
batch_dict = {}
|
||||
if isinstance(batch, dict):
|
||||
return batch
|
||||
conditions_names = batch[0].keys()
|
||||
|
||||
# Condition names
|
||||
for condition_name in conditions_names:
|
||||
single_cond_dict = {}
|
||||
condition_args = batch[0][condition_name].keys()
|
||||
for arg in condition_args:
|
||||
data_list = [batch[idx][condition_name][arg] for idx in range(
|
||||
min(len(batch),
|
||||
self.max_conditions_lengths[condition_name]))]
|
||||
if isinstance(data_list[0], LabelTensor):
|
||||
single_cond_dict[arg] = LabelTensor.stack(data_list)
|
||||
elif isinstance(data_list[0], torch.Tensor):
|
||||
single_cond_dict[arg] = torch.stack(data_list)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Data type {type(data_list[0])} not supported")
|
||||
batch_dict[condition_name] = single_cond_dict
|
||||
return batch_dict
|
||||
|
||||
def __call__(self, batch):
|
||||
return self.callable_function(batch)
|
||||
|
||||
|
||||
class PinaBatchSampler(BatchSampler):
|
||||
def __init__(self, dataset, batch_size, shuffle, sampler=None):
|
||||
if sampler is None:
|
||||
if (torch.distributed.is_available() and
|
||||
torch.distributed.is_initialized()):
|
||||
rank = torch.distributed.get_rank()
|
||||
world_size = torch.distributed.get_world_size()
|
||||
sampler = DistributedSampler(dataset, shuffle=shuffle,
|
||||
rank=rank, num_replicas=world_size)
|
||||
else:
|
||||
if shuffle:
|
||||
sampler = RandomSampler(dataset)
|
||||
else:
|
||||
sampler = SequentialSampler(dataset)
|
||||
super().__init__(sampler=sampler, batch_size=batch_size,
|
||||
drop_last=False)
|
||||
|
||||
class PinaDataModule(LightningDataModule):
|
||||
"""
|
||||
@@ -20,160 +74,218 @@ class PinaDataModule(LightningDataModule):
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
problem,
|
||||
device,
|
||||
collector,
|
||||
train_size=.7,
|
||||
test_size=.1,
|
||||
val_size=.2,
|
||||
test_size=.2,
|
||||
val_size=.1,
|
||||
predict_size=0.,
|
||||
batch_size=None,
|
||||
shuffle=True,
|
||||
datasets=None):
|
||||
repeat=False,
|
||||
automatic_batching=False
|
||||
):
|
||||
"""
|
||||
Initialize the object, creating dataset based on input problem
|
||||
:param AbstractProblem problem: PINA problem
|
||||
:param device: Device used for training and testing
|
||||
:param Collector collector: PINA problem
|
||||
:param train_size: number/percentage of elements in train split
|
||||
:param test_size: number/percentage of elements in test split
|
||||
:param eval_size: number/percentage of elements in evaluation split
|
||||
:param val_size: number/percentage of elements in evaluation split
|
||||
:param batch_size: batch size used for training
|
||||
:param datasets: list of datasets objects
|
||||
"""
|
||||
logging.debug('Start initialization of Pina DataModule')
|
||||
logging.info('Start initialization of Pina DataModule')
|
||||
super().__init__()
|
||||
self.problem = problem
|
||||
self.device = device
|
||||
self.dataset_classes = [
|
||||
SupervisedDataset, UnsupervisedDataset, SamplePointDataset
|
||||
]
|
||||
if datasets is None:
|
||||
self.datasets = None
|
||||
else:
|
||||
self.datasets = datasets
|
||||
|
||||
self.split_length = []
|
||||
self.split_names = []
|
||||
self.loader_functions = {}
|
||||
self.default_batching = automatic_batching
|
||||
self.batch_size = batch_size
|
||||
self.condition_names = problem.collector.conditions_name
|
||||
|
||||
if train_size > 0:
|
||||
self.split_names.append('train')
|
||||
self.split_length.append(train_size)
|
||||
self.loader_functions['train_dataloader'] = lambda: PinaDataLoader(
|
||||
self.splits['train'], self.batch_size, self.condition_names)
|
||||
if test_size > 0:
|
||||
self.split_length.append(test_size)
|
||||
self.split_names.append('test')
|
||||
self.loader_functions['test_dataloader'] = lambda: PinaDataLoader(
|
||||
self.splits['test'], self.batch_size, self.condition_names)
|
||||
if val_size > 0:
|
||||
self.split_length.append(val_size)
|
||||
self.split_names.append('val')
|
||||
self.loader_functions['val_dataloader'] = lambda: PinaDataLoader(
|
||||
self.splits['val'], self.batch_size, self.condition_names)
|
||||
if predict_size > 0:
|
||||
self.split_length.append(predict_size)
|
||||
self.split_names.append('predict')
|
||||
self.loader_functions['predict_dataloader'] = lambda: PinaDataLoader(
|
||||
self.splits['predict'], self.batch_size, self.condition_names)
|
||||
self.splits = {k: {} for k in self.split_names}
|
||||
self.shuffle = shuffle
|
||||
self.repeat = repeat
|
||||
|
||||
for k, v in self.loader_functions.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def prepare_data(self):
|
||||
if self.datasets is None:
|
||||
self._create_datasets()
|
||||
# Begin Data splitting
|
||||
splits_dict = {}
|
||||
if train_size > 0:
|
||||
splits_dict['train'] = train_size
|
||||
self.train_dataset = None
|
||||
else:
|
||||
self.train_dataloader = super().train_dataloader
|
||||
if test_size > 0:
|
||||
splits_dict['test'] = test_size
|
||||
self.test_dataset = None
|
||||
else:
|
||||
self.test_dataloader = super().test_dataloader
|
||||
if val_size > 0:
|
||||
splits_dict['val'] = val_size
|
||||
self.val_dataset = None
|
||||
else:
|
||||
self.val_dataloader = super().val_dataloader
|
||||
if predict_size > 0:
|
||||
splits_dict['predict'] = predict_size
|
||||
self.predict_dataset = None
|
||||
else:
|
||||
self.predict_dataloader = super().predict_dataloader
|
||||
self.collector_splits = self._create_splits(collector, splits_dict)
|
||||
|
||||
def setup(self, stage=None):
|
||||
"""
|
||||
Perform the splitting of the dataset
|
||||
"""
|
||||
logging.debug('Start setup of Pina DataModule obj')
|
||||
if self.datasets is None:
|
||||
self._create_datasets()
|
||||
if stage == 'fit' or stage is None:
|
||||
for dataset in self.datasets:
|
||||
if len(dataset) > 0:
|
||||
splits = self.dataset_split(dataset,
|
||||
self.split_length,
|
||||
shuffle=self.shuffle)
|
||||
for i in range(len(self.split_length)):
|
||||
self.splits[self.split_names[i]][
|
||||
dataset.data_type] = splits[i]
|
||||
self.train_dataset = PinaDatasetFactory(
|
||||
self.collector_splits['train'],
|
||||
max_conditions_lengths=self.find_max_conditions_lengths(
|
||||
'train'))
|
||||
if 'val' in self.collector_splits.keys():
|
||||
self.val_dataset = PinaDatasetFactory(
|
||||
self.collector_splits['val'],
|
||||
max_conditions_lengths=self.find_max_conditions_lengths(
|
||||
'val')
|
||||
)
|
||||
elif stage == 'test':
|
||||
raise NotImplementedError("Testing pipeline not implemented yet")
|
||||
self.test_dataset = PinaDatasetFactory(
|
||||
self.collector_splits['test'],
|
||||
max_conditions_lengths=self.find_max_conditions_lengths(
|
||||
'test')
|
||||
)
|
||||
elif stage == 'predict':
|
||||
self.predict_dataset = PinaDatasetFactory(
|
||||
self.collector_splits['predict'],
|
||||
max_conditions_lengths=self.find_max_conditions_lengths(
|
||||
'predict')
|
||||
)
|
||||
else:
|
||||
raise ValueError("stage must be either 'fit' or 'test'")
|
||||
raise ValueError(
|
||||
"stage must be either 'fit' or 'test' or 'predict'."
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def dataset_split(dataset, lengths, seed=None, shuffle=True):
|
||||
"""
|
||||
Perform the splitting of the dataset
|
||||
:param dataset: dataset object we wanted to split
|
||||
:param lengths: lengths of elements in dataset
|
||||
:param seed: random seed
|
||||
:param shuffle: shuffle dataset
|
||||
:return: split dataset
|
||||
:rtype: PinaSubset
|
||||
"""
|
||||
if sum(lengths) - 1 < 1e-3:
|
||||
len_dataset = len(dataset)
|
||||
lengths = [
|
||||
int(math.floor(len_dataset * length)) for length in lengths
|
||||
]
|
||||
remainder = len(dataset) - sum(lengths)
|
||||
for i in range(remainder):
|
||||
lengths[i % len(lengths)] += 1
|
||||
elif sum(lengths) - 1 >= 1e-3:
|
||||
raise ValueError(f"Sum of lengths is {sum(lengths)} less than 1")
|
||||
def _split_condition(condition_dict, splits_dict):
|
||||
len_condition = len(condition_dict['input_points'])
|
||||
|
||||
if shuffle:
|
||||
if seed is not None:
|
||||
generator = torch.Generator()
|
||||
generator.manual_seed(seed)
|
||||
indices = torch.randperm(sum(lengths), generator=generator)
|
||||
else:
|
||||
indices = torch.randperm(sum(lengths))
|
||||
dataset.apply_shuffle(indices)
|
||||
|
||||
indices = torch.arange(0, sum(lengths), 1, dtype=torch.uint8).tolist()
|
||||
offsets = [
|
||||
sum(lengths[:i]) if i > 0 else 0 for i in range(len(lengths))
|
||||
]
|
||||
return [
|
||||
PinaSubset(dataset, indices[offset:offset + length])
|
||||
for offset, length in zip(offsets, lengths)
|
||||
lengths = [
|
||||
int(math.floor(len_condition * length)) for length in
|
||||
splits_dict.values()
|
||||
]
|
||||
|
||||
def _create_datasets(self):
|
||||
remainder = len_condition - sum(lengths)
|
||||
for i in range(remainder):
|
||||
lengths[i % len(lengths)] += 1
|
||||
splits_dict = {k: v for k, v in zip(splits_dict.keys(), lengths)
|
||||
}
|
||||
to_return_dict = {}
|
||||
offset = 0
|
||||
for stage, stage_len in splits_dict.items():
|
||||
to_return_dict[stage] = {k: v[offset:offset + stage_len]
|
||||
for k, v in condition_dict.items() if
|
||||
k != 'equation'
|
||||
# Equations are NEVER dataloaded
|
||||
}
|
||||
offset += stage_len
|
||||
return to_return_dict
|
||||
|
||||
def _create_splits(self, collector, splits_dict):
|
||||
"""
|
||||
Create the dataset objects putting data
|
||||
Create the dataset objects putting data
|
||||
"""
|
||||
logging.debug('Dataset creation in PinaDataModule obj')
|
||||
collector = self.problem.collector
|
||||
batching_dim = self.problem.batching_dimension
|
||||
datasets_slots = [i.__slots__ for i in self.dataset_classes]
|
||||
self.datasets = [
|
||||
dataset(device=self.device) for dataset in self.dataset_classes
|
||||
]
|
||||
logging.debug('Filling datasets in PinaDataModule obj')
|
||||
for name, data in collector.data_collections.items():
|
||||
keys = list(data.keys())
|
||||
idx = [
|
||||
key for key, val in collector.conditions_name.items()
|
||||
if val == name
|
||||
]
|
||||
for i, slot in enumerate(datasets_slots):
|
||||
if slot == keys:
|
||||
self.datasets[i].add_points(data, idx[0], batching_dim)
|
||||
|
||||
# ----------- Auxiliary function ------------
|
||||
def _apply_shuffle(condition_dict, len_data):
|
||||
idx = torch.randperm(len_data)
|
||||
for k, v in condition_dict.items():
|
||||
if k == 'equation':
|
||||
continue
|
||||
datasets = []
|
||||
for dataset in self.datasets:
|
||||
if not dataset.empty:
|
||||
dataset.initialize()
|
||||
datasets.append(dataset)
|
||||
self.datasets = datasets
|
||||
if isinstance(v, list):
|
||||
condition_dict[k] = [v[i] for i in idx]
|
||||
elif isinstance(v, LabelTensor):
|
||||
condition_dict[k] = LabelTensor(v.tensor[idx],
|
||||
v.labels)
|
||||
elif isinstance(v, torch.Tensor):
|
||||
condition_dict[k] = v[idx]
|
||||
else:
|
||||
raise ValueError(f"Data type {type(v)} not supported")
|
||||
# ----------- End auxiliary function ------------
|
||||
|
||||
logging.debug('Dataset creation in PinaDataModule obj')
|
||||
split_names = list(splits_dict.keys())
|
||||
dataset_dict = {name: {} for name in split_names}
|
||||
for condition_name, condition_dict in collector.data_collections.items():
|
||||
len_data = len(condition_dict['input_points'])
|
||||
if self.shuffle:
|
||||
_apply_shuffle(condition_dict, len_data)
|
||||
for key, data in self._split_condition(condition_dict,
|
||||
splits_dict).items():
|
||||
dataset_dict[key].update({condition_name: data})
|
||||
return dataset_dict
|
||||
|
||||
def find_max_conditions_lengths(self, split):
|
||||
max_conditions_lengths = {}
|
||||
for k, v in self.collector_splits[split].items():
|
||||
if self.batch_size is None:
|
||||
max_conditions_lengths[k] = len(v['input_points'])
|
||||
elif self.repeat:
|
||||
max_conditions_lengths[k] = self.batch_size
|
||||
else:
|
||||
max_conditions_lengths[k] = min(len(v['input_points']),
|
||||
self.batch_size)
|
||||
return max_conditions_lengths
|
||||
|
||||
def val_dataloader(self):
|
||||
"""
|
||||
Create the validation dataloader
|
||||
"""
|
||||
|
||||
batch_size = self.batch_size if self.batch_size is not None else len(
|
||||
self.val_dataset)
|
||||
|
||||
# Use default batching in torch DataLoader (good is batch size is small)
|
||||
if self.default_batching:
|
||||
collate = Collator(self.find_max_conditions_lengths('val'))
|
||||
return DataLoader(self.val_dataset, self.batch_size,
|
||||
collate_fn=collate)
|
||||
collate = Collator(None)
|
||||
# Use custom batching (good if batch size is large)
|
||||
sampler = PinaBatchSampler(self.val_dataset, batch_size, shuffle=False)
|
||||
return DataLoader(self.val_dataset, sampler=sampler,
|
||||
collate_fn=collate)
|
||||
|
||||
def train_dataloader(self):
|
||||
"""
|
||||
Create the training dataloader
|
||||
"""
|
||||
# Use default batching in torch DataLoader (good is batch size is small)
|
||||
if self.default_batching:
|
||||
collate = Collator(self.find_max_conditions_lengths('train'))
|
||||
return DataLoader(self.train_dataset, self.batch_size,
|
||||
collate_fn=collate)
|
||||
collate = Collator(None)
|
||||
# Use custom batching (good if batch size is large)
|
||||
batch_size = self.batch_size if self.batch_size is not None else len(
|
||||
self.train_dataset)
|
||||
sampler = PinaBatchSampler(self.train_dataset, batch_size,
|
||||
shuffle=False)
|
||||
return DataLoader(self.train_dataset, sampler=sampler,
|
||||
collate_fn=collate)
|
||||
|
||||
def test_dataloader(self):
|
||||
"""
|
||||
Create the testing dataloader
|
||||
"""
|
||||
raise NotImplementedError("Test dataloader not implemented")
|
||||
|
||||
def predict_dataloader(self):
|
||||
"""
|
||||
Create the prediction dataloader
|
||||
"""
|
||||
raise NotImplementedError("Predict dataloader not implemented")
|
||||
|
||||
def transfer_batch_to_device(self, batch, device, dataloader_idx):
|
||||
"""
|
||||
Transfer the batch to the device. This method is called in the
|
||||
training loop and is used to transfer the batch to the device.
|
||||
"""
|
||||
batch = [
|
||||
(k, super(LightningDataModule, self).transfer_batch_to_device(v,
|
||||
device,
|
||||
dataloader_idx))
|
||||
for k, v in batch.items()
|
||||
]
|
||||
return batch
|
||||
|
||||
102
pina/data/dataset.py
Normal file
102
pina/data/dataset.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
This module provide basic data management functionalities
|
||||
"""
|
||||
import torch
|
||||
from torch.utils.data import Dataset
|
||||
from abc import abstractmethod
|
||||
from torch_geometric.data import Batch
|
||||
|
||||
class PinaDatasetFactory:
|
||||
"""
|
||||
Factory class for the PINA dataset. Depending on the type inside the
|
||||
conditions it creates a different dataset object:
|
||||
- PinaTensorDataset for torch.Tensor
|
||||
- PinaGraphDataset for list of torch_geometric.data.Data objects
|
||||
"""
|
||||
def __new__(cls, conditions_dict, **kwargs):
|
||||
if len(conditions_dict) == 0:
|
||||
raise ValueError('No conditions provided')
|
||||
if all([isinstance(v['input_points'], torch.Tensor) for v
|
||||
in conditions_dict.values()]):
|
||||
return PinaTensorDataset(conditions_dict, **kwargs)
|
||||
elif all([isinstance(v['input_points'], list) for v
|
||||
in conditions_dict.values()]):
|
||||
return PinaGraphDataset(conditions_dict, **kwargs)
|
||||
raise ValueError('Conditions must be either torch.Tensor or list of Data '
|
||||
'objects.')
|
||||
|
||||
class PinaDataset(Dataset):
|
||||
"""
|
||||
Abstract class for the PINA dataset
|
||||
"""
|
||||
def __init__(self, conditions_dict, max_conditions_lengths):
|
||||
self.conditions_dict = conditions_dict
|
||||
self.max_conditions_lengths = max_conditions_lengths
|
||||
self.conditions_length = {k: len(v['input_points']) for k, v in
|
||||
self.conditions_dict.items()}
|
||||
self.length = max(self.conditions_length.values())
|
||||
|
||||
def _get_max_len(self):
|
||||
max_len = 0
|
||||
for condition in self.conditions_dict.values():
|
||||
max_len = max(max_len, len(condition['input_points']))
|
||||
return max_len
|
||||
|
||||
def __len__(self):
|
||||
return self.length
|
||||
|
||||
@abstractmethod
|
||||
def __getitem__(self, item):
|
||||
pass
|
||||
|
||||
class PinaTensorDataset(PinaDataset):
|
||||
def __init__(self, conditions_dict, max_conditions_lengths,
|
||||
):
|
||||
super().__init__(conditions_dict, max_conditions_lengths)
|
||||
|
||||
def _getitem_int(self, idx):
|
||||
return {
|
||||
k: {k_data: v[k_data][idx % len(v['input_points'])] for k_data
|
||||
in v.keys()} for k, v in self.conditions_dict.items()
|
||||
}
|
||||
|
||||
def _getitem_list(self, idx):
|
||||
to_return_dict = {}
|
||||
for condition, data in self.conditions_dict.items():
|
||||
cond_idx = idx[:self.max_conditions_lengths[condition]]
|
||||
condition_len = self.conditions_length[condition]
|
||||
if self.length > condition_len:
|
||||
cond_idx = [idx%condition_len for idx in cond_idx]
|
||||
to_return_dict[condition] = {k: v[cond_idx]
|
||||
for k, v in data.items()}
|
||||
return to_return_dict
|
||||
|
||||
def __getitem__(self, idx):
|
||||
if isinstance(idx, int):
|
||||
return self._getitem_int(idx)
|
||||
return self._getitem_list(idx)
|
||||
|
||||
class PinaGraphDataset(PinaDataset):
|
||||
pass
|
||||
"""
|
||||
def __init__(self, conditions_dict, max_conditions_lengths):
|
||||
super().__init__(conditions_dict, max_conditions_lengths)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
|
||||
Getitem method for large batch size
|
||||
|
||||
to_return_dict = {}
|
||||
for condition, data in self.conditions_dict.items():
|
||||
cond_idx = idx[:self.max_conditions_lengths[condition]]
|
||||
condition_len = self.conditions_length[condition]
|
||||
if self.length > condition_len:
|
||||
cond_idx = [idx%condition_len for idx in cond_idx]
|
||||
to_return_dict[condition] = {k: Batch.from_data_list([v[i]
|
||||
for i in cond_idx])
|
||||
if isinstance(v, list)
|
||||
else v[cond_idx].tensor.reshape(-1, v.size(-1))
|
||||
for k, v in data.items()
|
||||
}
|
||||
return to_return_dict
|
||||
"""
|
||||
@@ -1,47 +0,0 @@
|
||||
"""
|
||||
Batch management module
|
||||
"""
|
||||
from .pina_subset import PinaSubset
|
||||
|
||||
|
||||
class Batch:
|
||||
"""
|
||||
Implementation of the Batch class used during training to perform SGD
|
||||
optimization.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset_dict, idx_dict, require_grad=True):
|
||||
self.attributes = []
|
||||
for k, v in dataset_dict.items():
|
||||
setattr(self, k, v)
|
||||
self.attributes.append(k)
|
||||
|
||||
for k, v in idx_dict.items():
|
||||
setattr(self, k + '_idx', v)
|
||||
self.require_grad = require_grad
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
Returns the number of elements in the batch
|
||||
:return: number of elements in the batch
|
||||
:rtype: int
|
||||
"""
|
||||
length = 0
|
||||
for dataset in dir(self):
|
||||
attribute = getattr(self, dataset)
|
||||
if isinstance(attribute, list):
|
||||
length += len(getattr(self, dataset))
|
||||
return length
|
||||
|
||||
def __getattribute__(self, item):
|
||||
if item in super().__getattribute__('attributes'):
|
||||
dataset = super().__getattribute__(item)
|
||||
index = super().__getattribute__(item + '_idx')
|
||||
return PinaSubset(dataset.dataset, dataset.indices[index])
|
||||
return super().__getattribute__(item)
|
||||
|
||||
def __getattr__(self, item):
|
||||
if item == 'data' and len(self.attributes) == 1:
|
||||
item = self.attributes[0]
|
||||
return super().__getattribute__(item)
|
||||
raise AttributeError(f"'Batch' object has no attribute '{item}'")
|
||||
@@ -1,68 +0,0 @@
|
||||
"""
|
||||
This module is used to create an iterable object used during training
|
||||
"""
|
||||
import math
|
||||
from .pina_batch import Batch
|
||||
|
||||
|
||||
class PinaDataLoader:
|
||||
"""
|
||||
This class is used to create a dataloader to use during the training.
|
||||
|
||||
:var condition_names: The names of the conditions. The order is consistent
|
||||
with the condition indeces in the batches.
|
||||
:vartype condition_names: list[str]
|
||||
"""
|
||||
|
||||
def __init__(self, dataset_dict, batch_size, condition_names) -> None:
|
||||
"""
|
||||
Initialize local variables
|
||||
:param dataset_dict: Dictionary of datasets
|
||||
:type dataset_dict: dict
|
||||
:param batch_size: Size of the batch
|
||||
:type batch_size: int
|
||||
:param condition_names: Names of the conditions
|
||||
:type condition_names: list[str]
|
||||
"""
|
||||
self.condition_names = condition_names
|
||||
self.dataset_dict = dataset_dict
|
||||
self._init_batches(batch_size)
|
||||
|
||||
def _init_batches(self, batch_size=None):
|
||||
"""
|
||||
Create batches according to the batch_size provided in input.
|
||||
"""
|
||||
self.batches = []
|
||||
n_elements = sum(len(v) for v in self.dataset_dict.values())
|
||||
if batch_size is None:
|
||||
batch_size = n_elements
|
||||
indexes_dict = {}
|
||||
n_batches = int(math.ceil(n_elements / batch_size))
|
||||
for k, v in self.dataset_dict.items():
|
||||
if n_batches != 1:
|
||||
indexes_dict[k] = math.floor(len(v) / (n_batches - 1))
|
||||
else:
|
||||
indexes_dict[k] = len(v)
|
||||
for i in range(n_batches):
|
||||
temp_dict = {}
|
||||
for k, v in indexes_dict.items():
|
||||
if i != n_batches - 1:
|
||||
temp_dict[k] = slice(i * v, (i + 1) * v)
|
||||
else:
|
||||
temp_dict[k] = slice(i * v, len(self.dataset_dict[k]))
|
||||
self.batches.append(
|
||||
Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict))
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Makes dataloader object iterable
|
||||
"""
|
||||
yield from self.batches
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
Return the number of batches.
|
||||
:return: The number of batches.
|
||||
:rtype: int
|
||||
"""
|
||||
return len(self.batches)
|
||||
@@ -1,36 +0,0 @@
|
||||
"""
|
||||
Module for PinaSubset class
|
||||
"""
|
||||
from pina import LabelTensor
|
||||
from torch import Tensor, float32
|
||||
|
||||
|
||||
class PinaSubset:
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
__slots__ = ['dataset', 'indices', 'require_grad']
|
||||
|
||||
def __init__(self, dataset, indices, require_grad=True):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
self.dataset = dataset
|
||||
self.indices = indices
|
||||
self.require_grad = require_grad
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return len(self.indices)
|
||||
|
||||
def __getattr__(self, name):
|
||||
tensor = self.dataset.__getattribute__(name)
|
||||
if isinstance(tensor, (LabelTensor, Tensor)):
|
||||
tensor = tensor[[self.indices]].to(self.dataset.device)
|
||||
return tensor.requires_grad_(
|
||||
self.require_grad) if tensor.dtype == float32 else tensor
|
||||
if isinstance(tensor, list):
|
||||
return [tensor[i] for i in self.indices]
|
||||
raise AttributeError(f"No attribute named {name}")
|
||||
@@ -1,35 +0,0 @@
|
||||
"""
|
||||
Sample dataset module
|
||||
"""
|
||||
from copy import deepcopy
|
||||
from .base_dataset import BaseDataset
|
||||
from ..condition import InputPointsEquationCondition
|
||||
|
||||
|
||||
class SamplePointDataset(BaseDataset):
|
||||
"""
|
||||
This class extends the BaseDataset to handle physical datasets
|
||||
composed of only input points.
|
||||
"""
|
||||
data_type = 'physics'
|
||||
__slots__ = InputPointsEquationCondition.__slots__
|
||||
|
||||
def add_points(self, data_dict, condition_idx, batching_dim=0):
|
||||
data_dict = deepcopy(data_dict)
|
||||
data_dict.pop('equation')
|
||||
super().add_points(data_dict, condition_idx)
|
||||
|
||||
def _init_from_problem(self, collector_dict):
|
||||
for name, data in collector_dict.items():
|
||||
keys = list(data.keys())
|
||||
if set(self.__slots__) == set(keys):
|
||||
data = deepcopy(data)
|
||||
data.pop('equation')
|
||||
self._populate_init_list(data)
|
||||
idx = [
|
||||
key for key, val in
|
||||
self.problem.collector.conditions_name.items()
|
||||
if val == name
|
||||
]
|
||||
self.conditions_idx.append(idx)
|
||||
self.initialize()
|
||||
@@ -1,13 +0,0 @@
|
||||
"""
|
||||
Supervised dataset module
|
||||
"""
|
||||
from .base_dataset import BaseDataset
|
||||
|
||||
|
||||
class SupervisedDataset(BaseDataset):
|
||||
"""
|
||||
This class extends the BaseDataset to handle datasets that consist of
|
||||
input-output pairs.
|
||||
"""
|
||||
data_type = 'supervised'
|
||||
__slots__ = ['input_points', 'output_points']
|
||||
@@ -1,14 +0,0 @@
|
||||
"""
|
||||
Unsupervised dataset module
|
||||
"""
|
||||
from .base_dataset import BaseDataset
|
||||
|
||||
|
||||
class UnsupervisedDataset(BaseDataset):
|
||||
"""
|
||||
This class extend BaseDataset class to handle
|
||||
unsupervised dataset,composed of input points
|
||||
and, optionally, conditional variables
|
||||
"""
|
||||
data_type = 'unsupervised'
|
||||
__slots__ = ['input_points', 'conditional_variables']
|
||||
Reference in New Issue
Block a user