Implement Dataset, Dataloader and DataModule class and fix SupervisedSolver

This commit is contained in:
FilippoOlivo
2024-10-16 11:24:37 +02:00
committed by Nicola Demo
parent b9753c34b2
commit c9304fb9bb
30 changed files with 770 additions and 784 deletions

View File

@@ -1,7 +1,20 @@
"""
Import data classes
"""
__all__ = [
'PinaDataLoader',
'SupervisedDataset',
'SamplePointDataset',
'UnsupervisedDataset',
'Batch',
'PinaDataModule',
'BaseDataset'
]
from .pina_dataloader import SamplePointLoader
from .data_dataset import DataPointDataset
from .pina_dataloader import PinaDataLoader
from .supervised_dataset import SupervisedDataset
from .sample_dataset import SamplePointDataset
from .pina_batch import Batch
from .unsupervised_dataset import UnsupervisedDataset
from .pina_batch import Batch
from .data_module import PinaDataModule
from .base_dataset import BaseDataset

107
pina/data/base_dataset.py Normal file
View File

@@ -0,0 +1,107 @@
"""
Basic data module implementation
"""
from torch.utils.data import Dataset
import torch
from ..label_tensor import LabelTensor
class BaseDataset(Dataset):
"""
BaseDataset class, which handle initialization and data retrieval
:var condition_indices: List of indices
:var device: torch.device
:var condition_names: dict of condition index and corresponding name
"""
def __new__(cls, problem, device):
"""
Ensure correct definition of __slots__ before initialization
:param AbstractProblem problem: The formulation of the problem.
:param torch.device device: The device on which the
dataset will be loaded.
"""
if cls is BaseDataset:
raise TypeError('BaseDataset cannot be instantiated directly. Use a subclass.')
if not hasattr(cls, '__slots__'):
raise TypeError('Something is wrong, __slots__ must be defined in subclasses.')
return super().__new__(cls)
def __init__(self, problem, device):
""""
Initialize the object based on __slots__
:param AbstractProblem problem: The formulation of the problem.
:param torch.device device: The device on which the
dataset will be loaded.
"""
super().__init__()
self.condition_names = {}
collector = problem.collector
for slot in self.__slots__:
setattr(self, slot, [])
idx = 0
for name, data in collector.data_collections.items():
keys = []
for k, v in data.items():
if isinstance(v, LabelTensor):
keys.append(k)
if sorted(self.__slots__) == sorted(keys):
for slot in self.__slots__:
current_list = getattr(self, slot)
current_list.append(data[slot])
self.condition_names[idx] = name
idx += 1
if len(getattr(self, self.__slots__[0])) > 0:
input_list = getattr(self, self.__slots__[0])
self.condition_indices = torch.cat(
[
torch.tensor([i] * len(input_list[i]), dtype=torch.uint8)
for i in range(len(self.condition_names))
],
dim=0,
)
for slot in self.__slots__:
current_attribute = getattr(self, slot)
setattr(self, slot, LabelTensor.vstack(current_attribute))
else:
self.condition_indices = torch.tensor([], dtype=torch.uint8)
for slot in self.__slots__:
setattr(self, slot, torch.tensor([]))
self.device = device
def __len__(self):
return len(getattr(self, self.__slots__[0]))
def __getattribute__(self, item):
attribute = super().__getattribute__(item)
if isinstance(attribute, LabelTensor) and attribute.dtype == torch.float32:
attribute = attribute.to(device=self.device).requires_grad_()
return attribute
def __getitem__(self, idx):
if isinstance(idx, str):
return getattr(self, idx).to(self.device)
if isinstance(idx, slice):
to_return_list = []
for i in self.__slots__:
to_return_list.append(getattr(self, i)[[idx]].to(self.device))
return to_return_list
if isinstance(idx, (tuple, list)):
if (len(idx) == 2 and isinstance(idx[0], str)
and isinstance(idx[1], (list, slice))):
tensor = getattr(self, idx[0])
return tensor[[idx[1]]].to(self.device)
if all(isinstance(x, int) for x in idx):
to_return_list = []
for i in self.__slots__:
to_return_list.append(getattr(self, i)[[idx]].to(self.device))
return to_return_list
raise ValueError(f'Invalid index {idx}')

View File

@@ -1,41 +0,0 @@
from torch.utils.data import Dataset
import torch
from ..label_tensor import LabelTensor
class DataPointDataset(Dataset):
def __init__(self, problem, device) -> None:
super().__init__()
input_list = []
output_list = []
self.condition_names = []
for name, condition in problem.conditions.items():
if hasattr(condition, "output_points"):
input_list.append(problem.conditions[name].input_points)
output_list.append(problem.conditions[name].output_points)
self.condition_names.append(name)
self.input_pts = LabelTensor.stack(input_list)
self.output_pts = LabelTensor.stack(output_list)
if self.input_pts != []:
self.condition_indeces = torch.cat(
[
torch.tensor([i] * len(input_list[i]))
for i in range(len(self.condition_names))
],
dim=0,
)
else: # if there are no data points
self.condition_indeces = torch.tensor([])
self.input_pts = torch.tensor([])
self.output_pts = torch.tensor([])
self.input_pts = self.input_pts.to(device)
self.output_pts = self.output_pts.to(device)
self.condition_indeces = self.condition_indeces.to(device)
def __len__(self):
return self.input_pts.shape[0]

172
pina/data/data_module.py Normal file
View File

@@ -0,0 +1,172 @@
"""
This module provide basic data management functionalities
"""
import math
import torch
from lightning import LightningDataModule
from .sample_dataset import SamplePointDataset
from .supervised_dataset import SupervisedDataset
from .unsupervised_dataset import UnsupervisedDataset
from .pina_dataloader import PinaDataLoader
from .pina_subset import PinaSubset
class PinaDataModule(LightningDataModule):
"""
This class extend LightningDataModule, allowing proper creation and
management of different types of Datasets defined in PINA
"""
def __init__(self,
problem,
device,
train_size=.7,
test_size=.2,
eval_size=.1,
batch_size=None,
shuffle=True,
datasets = None):
"""
Initialize the object, creating dataset based on input problem
:param AbstractProblem problem: PINA problem
:param device: Device used for training and testing
:param train_size: number/percentage of elements in train split
:param test_size: number/percentage of elements in test split
:param eval_size: number/percentage of elements in evaluation split
:param batch_size: batch size used for training
:param datasets: list of datasets objects
"""
super().__init__()
dataset_classes = [SupervisedDataset, UnsupervisedDataset, SamplePointDataset]
if datasets is None:
self.datasets = [DatasetClass(problem, device) for DatasetClass in dataset_classes]
else:
self.datasets = datasets
self.split_length = []
self.split_names = []
if train_size > 0:
self.split_names.append('train')
self.split_length.append(train_size)
if test_size > 0:
self.split_length.append(test_size)
self.split_names.append('test')
if eval_size > 0:
self.split_length.append(eval_size)
self.split_names.append('eval')
self.batch_size = batch_size
self.condition_names = None
self.splits = {k: {} for k in self.split_names}
self.shuffle = shuffle
def setup(self, stage=None):
"""
Perform the splitting of the dataset
"""
self.extract_conditions()
if stage == 'fit' or stage is None:
for dataset in self.datasets:
if len(dataset) > 0:
splits = self.dataset_split(dataset,
self.split_length,
shuffle=self.shuffle)
for i in range(len(self.split_length)):
self.splits[
self.split_names[i]][dataset.data_type] = splits[i]
elif stage == 'test':
raise NotImplementedError("Testing pipeline not implemented yet")
else:
raise ValueError("stage must be either 'fit' or 'test'")
def extract_conditions(self):
"""
Extract conditions from dataset and update condition indices
"""
# Extract number of conditions
n_conditions = 0
for dataset in self.datasets:
if n_conditions != 0:
dataset.condition_names = {
key + n_conditions: value
for key, value in dataset.condition_names.items()
}
n_conditions += len(dataset.condition_names)
self.condition_names = {
key: value
for dataset in self.datasets
for key, value in dataset.condition_names.items()
}
def train_dataloader(self):
"""
Return the training dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['train'], self.batch_size,
self.condition_names)
def test_dataloader(self):
"""
Return the testing dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['test'], self.batch_size,
self.condition_names)
def eval_dataloader(self):
"""
Return the evaluation dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['eval'], self.batch_size,
self.condition_names)
@staticmethod
def dataset_split(dataset, lengths, seed=None, shuffle=True):
"""
Perform the splitting of the dataset
:param dataset: dataset object we wanted to split
:param lengths: lengths of elements in dataset
:param seed: random seed
:param shuffle: shuffle dataset
:return: split dataset
:rtype: PinaSubset
"""
if sum(lengths) - 1 < 1e-3:
lengths = [
int(math.floor(len(dataset) * length)) for length in lengths
]
remainder = len(dataset) - sum(lengths)
for i in range(remainder):
lengths[i % len(lengths)] += 1
elif sum(lengths) - 1 >= 1e-3:
raise ValueError(f"Sum of lengths is {sum(lengths)} less than 1")
if sum(lengths) != len(dataset):
raise ValueError("Sum of lengths is not equal to dataset length")
if shuffle:
if seed is not None:
generator = torch.Generator()
generator.manual_seed(seed)
indices = torch.randperm(sum(lengths), generator=generator).tolist()
else:
indices = torch.arange(sum(lengths)).tolist()
else:
indices = torch.arange(0, sum(lengths), 1, dtype=torch.uint8).tolist()
offsets = [
sum(lengths[:i]) if i > 0 else 0 for i in range(len(lengths))
]
return [
PinaSubset(dataset, indices[offset:offset + length])
for offset, length in zip(offsets, lengths)
]

View File

@@ -1,36 +1,33 @@
"""
Batch management module
"""
from .pina_subset import PinaSubset
class Batch:
"""
This class is used to create a dataset of sample points.
"""
def __init__(self, dataset_dict, idx_dict):
def __init__(self, type_, idx, *args, **kwargs) -> None:
for k, v in dataset_dict.items():
setattr(self, k, v)
for k, v in idx_dict.items():
setattr(self, k + '_idx', v)
def __len__(self):
"""
Returns the number of elements in the batch
:return: number of elements in the batch
:rtype: int
"""
if type_ == "sample":
length = 0
for dataset in dir(self):
attribute = getattr(self, dataset)
if isinstance(attribute, list):
length += len(getattr(self, dataset))
return length
if len(args) != 2:
raise RuntimeError
input = args[0]
conditions = args[1]
self.input = input[idx]
self.condition = conditions[idx]
elif type_ == "data":
if len(args) != 3:
raise RuntimeError
input = args[0]
output = args[1]
conditions = args[2]
self.input = input[idx]
self.output = output[idx]
self.condition = conditions[idx]
else:
raise ValueError("Invalid number of arguments.")
def __getattr__(self, item):
if not item in dir(self):
raise AttributeError(f'Batch instance has no attribute {item}')
return PinaSubset(getattr(self, item).dataset,
getattr(self, item).indices[self.coordinates_dict[item]])

View File

@@ -1,11 +1,11 @@
import torch
from .sample_dataset import SamplePointDataset
from .data_dataset import DataPointDataset
"""
This module is used to create an iterable object used during training
"""
import math
from .pina_batch import Batch
class SamplePointLoader:
class PinaDataLoader:
"""
This class is used to create a dataloader to use during the training.
@@ -14,198 +14,54 @@ class SamplePointLoader:
:vartype condition_names: list[str]
"""
def __init__(
self, sample_dataset, data_dataset, batch_size=None, shuffle=True
) -> None:
def __init__(self, dataset_dict, batch_size, condition_names) -> None:
"""
Constructor.
:param SamplePointDataset sample_pts: The sample points dataset.
:param int batch_size: The batch size. If ``None``, the batch size is
set to the number of sample points. Default is ``None``.
:param bool shuffle: If ``True``, the sample points are shuffled.
Default is ``True``.
Initialize local variables
:param dataset_dict: Dictionary of datasets
:type dataset_dict: dict
:param batch_size: Size of the batch
:type batch_size: int
:param condition_names: Names of the conditions
:type condition_names: list[str]
"""
if not isinstance(sample_dataset, SamplePointDataset):
raise TypeError(
f"Expected SamplePointDataset, got {type(sample_dataset)}"
)
if not isinstance(data_dataset, DataPointDataset):
raise TypeError(
f"Expected DataPointDataset, got {type(data_dataset)}"
)
self.condition_names = condition_names
self.dataset_dict = dataset_dict
self._init_batches(batch_size)
self.n_data_conditions = len(data_dataset.condition_names)
self.n_phys_conditions = len(sample_dataset.condition_names)
data_dataset.condition_indeces += self.n_phys_conditions
self._prepare_sample_dataset(sample_dataset, batch_size, shuffle)
self._prepare_data_dataset(data_dataset, batch_size, shuffle)
self.condition_names = (
sample_dataset.condition_names + data_dataset.condition_names
)
self.batch_list = []
for i in range(len(self.batch_sample_pts)):
self.batch_list.append(("sample", i))
for i in range(len(self.batch_input_pts)):
self.batch_list.append(("data", i))
if shuffle:
self.random_idx = torch.randperm(len(self.batch_list))
else:
self.random_idx = torch.arange(len(self.batch_list))
self._prepare_batches()
def _prepare_data_dataset(self, dataset, batch_size, shuffle):
def _init_batches(self, batch_size=None):
"""
Prepare the dataset for data points.
:param SamplePointDataset dataset: The dataset.
:param int batch_size: The batch size.
:param bool shuffle: If ``True``, the sample points are shuffled.
"""
self.sample_dataset = dataset
if len(dataset) == 0:
self.batch_data_conditions = []
self.batch_input_pts = []
self.batch_output_pts = []
return
if batch_size is None:
batch_size = len(dataset)
batch_num = len(dataset) // batch_size
if len(dataset) % batch_size != 0:
batch_num += 1
output_labels = dataset.output_pts.labels
input_labels = dataset.input_pts.labels
self.tensor_conditions = dataset.condition_indeces
if shuffle:
idx = torch.randperm(dataset.input_pts.shape[0])
self.input_pts = dataset.input_pts[idx]
self.output_pts = dataset.output_pts[idx]
self.tensor_conditions = dataset.condition_indeces[idx]
self.batch_input_pts = torch.tensor_split(dataset.input_pts, batch_num)
self.batch_output_pts = torch.tensor_split(
dataset.output_pts, batch_num
)
#print(input_labels)
for i in range(len(self.batch_input_pts)):
self.batch_input_pts[i].labels = input_labels
self.batch_output_pts[i].labels = output_labels
self.batch_data_conditions = torch.tensor_split(
self.tensor_conditions, batch_num
)
def _prepare_sample_dataset(self, dataset, batch_size, shuffle):
"""
Prepare the dataset for sample points.
:param DataPointDataset dataset: The dataset.
:param int batch_size: The batch size.
:param bool shuffle: If ``True``, the sample points are shuffled.
"""
self.sample_dataset = dataset
if len(dataset) == 0:
self.batch_sample_conditions = []
self.batch_sample_pts = []
return
if batch_size is None:
batch_size = len(dataset)
batch_num = len(dataset) // batch_size
if len(dataset) % batch_size != 0:
batch_num += 1
self.tensor_pts = dataset.pts
self.tensor_conditions = dataset.condition_indeces
# if shuffle:
# idx = torch.randperm(self.tensor_pts.shape[0])
# self.tensor_pts = self.tensor_pts[idx]
# self.tensor_conditions = self.tensor_conditions[idx]
self.batch_sample_pts = torch.tensor_split(self.tensor_pts, batch_num)
for i in range(len(self.batch_sample_pts)):
self.batch_sample_pts[i].labels = dataset.pts.labels
self.batch_sample_conditions = torch.tensor_split(
self.tensor_conditions, batch_num
)
def _prepare_batches(self):
"""
Prepare the batches.
Create batches according to the batch_size provided in input.
"""
self.batches = []
for i in range(len(self.batch_list)):
type_, idx_ = self.batch_list[i]
if type_ == "sample":
batch = Batch(
"sample", idx_,
self.batch_sample_pts,
self.batch_sample_conditions)
n_elements = sum([len(v) for v in self.dataset_dict.values()])
if batch_size is None:
batch_size = n_elements
indexes_dict = {}
n_batches = int(math.ceil(n_elements / batch_size))
for k, v in self.dataset_dict.items():
if n_batches != 1:
indexes_dict[k] = math.floor(len(v) / (n_batches - 1))
else:
batch = Batch(
"data", idx_,
self.batch_input_pts,
self.batch_output_pts,
self.batch_data_conditions)
self.batches.append(batch)
indexes_dict[k] = len(v)
for i in range(n_batches):
temp_dict = {}
for k, v in indexes_dict.items():
if i != n_batches - 1:
temp_dict[k] = slice(i * v, (i + 1) * v)
else:
temp_dict[k] = slice(i * v, len(self.dataset_dict[k]))
self.batches.append(Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict))
def __iter__(self):
"""
Return an iterator over the points. Any element of the iterator is a
dictionary with the following keys:
- ``pts``: The input sample points. It is a LabelTensor with the
shape ``(batch_size, input_dimension)``.
- ``output``: The output sample points. This key is present only
if data conditions are present. It is a LabelTensor with the
shape ``(batch_size, output_dimension)``.
- ``condition``: The integer condition indeces. It is a tensor
with the shape ``(batch_size, )`` of type ``torch.int64`` and
indicates for any ``pts`` the corresponding problem condition.
:return: An iterator over the points.
:rtype: iter
Makes dataloader object iterable
"""
# for i in self.random_idx:
for i in self.random_idx:
yield self.batches[i]
# for i in range(len(self.batch_list)):
# type_, idx_ = self.batch_list[i]
# if type_ == "sample":
# d = {
# "pts": self.batch_sample_pts[idx_].requires_grad_(True),
# "condition": self.batch_sample_conditions[idx_],
# }
# else:
# d = {
# "pts": self.batch_input_pts[idx_].requires_grad_(True),
# "output": self.batch_output_pts[idx_],
# "condition": self.batch_data_conditions[idx_],
# }
# yield d
yield from self.batches
def __len__(self):
"""
Return the number of batches.
:return: The number of batches.
:rtype: int
"""
return len(self.batch_list)
return len(self.batches)

21
pina/data/pina_subset.py Normal file
View File

@@ -0,0 +1,21 @@
class PinaSubset:
"""
TODO
"""
__slots__ = ['dataset', 'indices']
def __init__(self, dataset, indices):
"""
TODO
"""
self.dataset = dataset
self.indices = indices
def __len__(self):
"""
TODO
"""
return len(self.indices)
def __getattr__(self, name):
return self.dataset.__getattribute__(name)

View File

@@ -1,43 +1,12 @@
from torch.utils.data import Dataset
import torch
"""
Sample dataset module
"""
from .base_dataset import BaseDataset
from ..label_tensor import LabelTensor
class SamplePointDataset(Dataset):
class SamplePointDataset(BaseDataset):
"""
This class is used to create a dataset of sample points.
This class extends the BaseDataset to handle physical datasets
composed of only input points.
"""
def __init__(self, problem, device) -> None:
"""
:param dict input_pts: The input points.
"""
super().__init__()
pts_list = []
self.condition_names = []
for name, condition in problem.conditions.items():
if not hasattr(condition, "output_points"):
pts_list.append(problem.input_pts[name])
self.condition_names.append(name)
self.pts = LabelTensor.stack(pts_list)
if self.pts != []:
self.condition_indeces = torch.cat(
[
torch.tensor([i] * len(pts_list[i]))
for i in range(len(self.condition_names))
],
dim=0,
)
else: # if there are no sample points
self.condition_indeces = torch.tensor([])
self.pts = torch.tensor([])
self.pts = self.pts.to(device)
self.condition_indeces = self.condition_indeces.to(device)
def __len__(self):
return self.pts.shape[0]
data_type = 'physics'
__slots__ = ['input_points']

View File

@@ -0,0 +1,12 @@
"""
Supervised dataset module
"""
from .base_dataset import BaseDataset
class SupervisedDataset(BaseDataset):
"""
This class extends the BaseDataset to handle datasets that consist of input-output pairs.
"""
data_type = 'supervised'
__slots__ = ['input_points', 'output_points']

View File

@@ -0,0 +1,13 @@
"""
Unsupervised dataset module
"""
from .base_dataset import BaseDataset
class UnsupervisedDataset(BaseDataset):
"""
This class extend BaseDataset class to handle unsupervised dataset,
composed of input points and, optionally, conditional variables
"""
data_type = 'unsupervised'
__slots__ = ['input_points', 'conditional_variables']