Improve efficiency and refact LabelTensor, codacy correction and fix bug in PinaBatch

This commit is contained in:
FilippoOlivo
2024-10-23 15:04:28 +02:00
committed by Nicola Demo
parent ccc5f5a322
commit ea3d1924e7
13 changed files with 496 additions and 395 deletions

View File

@@ -1,10 +1,12 @@
"""
Basic data module implementation
"""
from torch.utils.data import Dataset
import torch
import logging
from torch.utils.data import Dataset
from ..label_tensor import LabelTensor
from ..graph import Graph
class BaseDataset(Dataset):
@@ -12,10 +14,9 @@ class BaseDataset(Dataset):
BaseDataset class, which handle initialization and data retrieval
:var condition_indices: List of indices
:var device: torch.device
:var condition_names: dict of condition index and corresponding name
"""
def __new__(cls, problem, device):
def __new__(cls, problem=None, device=torch.device('cpu')):
"""
Ensure correct definition of __slots__ before initialization
:param AbstractProblem problem: The formulation of the problem.
@@ -30,7 +31,7 @@ class BaseDataset(Dataset):
'Something is wrong, __slots__ must be defined in subclasses.')
return object.__new__(cls)
def __init__(self, problem, device):
def __init__(self, problem=None, device=torch.device('cpu')):
""""
Initialize the object based on __slots__
:param AbstractProblem problem: The formulation of the problem.
@@ -38,79 +39,118 @@ class BaseDataset(Dataset):
dataset will be loaded.
"""
super().__init__()
self.condition_names = {}
collector = problem.collector
self.empty = True
self.problem = problem
self.device = device
self.condition_indices = None
for slot in self.__slots__:
setattr(self, slot, [])
num_el_per_condition = []
idx = 0
for name, data in collector.data_collections.items():
self.num_el_per_condition = []
self.conditions_idx = []
if self.problem is not None:
self._init_from_problem(self.problem.collector.data_collections)
self.initialized = False
def _init_from_problem(self, collector_dict):
"""
TODO
"""
for name, data in collector_dict.items():
keys = list(data.keys())
current_cond_num_el = None
if sorted(self.__slots__) == sorted(keys):
for slot in self.__slots__:
slot_data = data[slot]
if isinstance(slot_data, (LabelTensor, torch.Tensor,
Graph)):
if current_cond_num_el is None:
current_cond_num_el = len(slot_data)
elif current_cond_num_el != len(slot_data):
raise ValueError('Different number of conditions')
current_list = getattr(self, slot)
current_list += [data[slot]] if not (
isinstance(data[slot], list)) else data[slot]
num_el_per_condition.append(current_cond_num_el)
self.condition_names[idx] = name
idx += 1
if num_el_per_condition:
if set(self.__slots__) == set(keys):
self._populate_init_list(data)
idx = [key for key, val in
self.problem.collector.conditions_name.items() if
val == name]
self.conditions_idx.append(idx)
self.initialize()
def add_points(self, data_dict, condition_idx, batching_dim=0):
"""
This method filled internal lists of data points
:param data_dict: dictionary containing data points
:param condition_idx: index of the condition to which the data points
belong to
:param batching_dim: dimension of the batching
:raises: ValueError if the dataset has already been initialized
"""
if not self.initialized:
self._populate_init_list(data_dict, batching_dim)
self.conditions_idx.append(condition_idx)
self.empty = False
else:
raise ValueError('Dataset already initialized')
def _populate_init_list(self, data_dict, batching_dim=0):
current_cond_num_el = None
for slot in data_dict.keys():
slot_data = data_dict[slot]
if batching_dim != 0:
if isinstance(slot_data, (LabelTensor, torch.Tensor)):
dims = len(slot_data.size())
slot_data = slot_data.permute(
[batching_dim] + [dim for dim in range(dims) if
dim != batching_dim])
if current_cond_num_el is None:
current_cond_num_el = len(slot_data)
elif current_cond_num_el != len(slot_data):
raise ValueError('Different dimension in same condition')
current_list = getattr(self, slot)
current_list += [slot_data] if not (
isinstance(slot_data, list)) else slot_data
self.num_el_per_condition.append(current_cond_num_el)
def initialize(self):
"""
Initialize the datasets tensors/LabelTensors/lists given the lists
already filled
"""
logging.debug(f'Initialize dataset {self.__class__.__name__}')
if self.num_el_per_condition:
self.condition_indices = torch.cat(
[
torch.tensor([i] * num_el_per_condition[i],
torch.tensor([i] * self.num_el_per_condition[i],
dtype=torch.uint8)
for i in range(len(num_el_per_condition))
for i in range(len(self.num_el_per_condition))
],
dim=0,
dim=0
)
for slot in self.__slots__:
current_attribute = getattr(self, slot)
if all(isinstance(a, LabelTensor) for a in current_attribute):
setattr(self, slot, LabelTensor.vstack(current_attribute))
else:
self.condition_indices = torch.tensor([], dtype=torch.uint8)
for slot in self.__slots__:
setattr(self, slot, torch.tensor([]))
self.device = device
self.initialized = True
def __len__(self):
"""
:return: Number of elements in the dataset
"""
return len(getattr(self, self.__slots__[0]))
def __getattribute__(self, item):
attribute = super().__getattribute__(item)
if isinstance(attribute,
LabelTensor) and attribute.dtype == torch.float32:
attribute = attribute.to(device=self.device).requires_grad_()
return attribute
def __getitem__(self, idx):
if isinstance(idx, str):
return getattr(self, idx).to(self.device)
if isinstance(idx, slice):
to_return_list = []
for i in self.__slots__:
to_return_list.append(getattr(self, i)[idx].to(self.device))
return to_return_list
"""
:param idx:
:return:
"""
if not isinstance(idx, (tuple, list, slice, int)):
raise IndexError("Invalid index")
tensors = []
for attribute in self.__slots__:
tensor = getattr(self, attribute)
if isinstance(attribute, (LabelTensor, torch.Tensor)):
tensors.append(tensor.__getitem__(idx))
elif isinstance(attribute, list):
if isinstance(idx, (list, tuple)):
tensor = [tensor[i] for i in idx]
tensors.append(tensor)
return tensors
if isinstance(idx, (tuple, list)):
if (len(idx) == 2 and isinstance(idx[0], str)
and isinstance(idx[1], (list, slice))):
tensor = getattr(self, idx[0])
return tensor[[idx[1]]].to(self.device)
if all(isinstance(x, int) for x in idx):
to_return_list = []
for i in self.__slots__:
to_return_list.append(
getattr(self, i)[[idx]].to(self.device))
return to_return_list
raise ValueError(f'Invalid index {idx}')
def apply_shuffle(self, indices):
for slot in self.__slots__:
if slot != 'equation':
attribute = getattr(self, slot)
if isinstance(attribute, (LabelTensor, torch.Tensor)):
setattr(self, 'slot', attribute[[indices]])
if isinstance(attribute, list):
setattr(self, 'slot', [attribute[i] for i in indices])

View File

@@ -4,7 +4,8 @@ This module provide basic data management functionalities
import math
import torch
from lightning import LightningDataModule
import logging
from pytorch_lightning import LightningDataModule
from .sample_dataset import SamplePointDataset
from .supervised_dataset import SupervisedDataset
from .unsupervised_dataset import UnsupervisedDataset
@@ -22,8 +23,9 @@ class PinaDataModule(LightningDataModule):
problem,
device,
train_size=.7,
test_size=.2,
eval_size=.1,
test_size=.1,
val_size=.2,
predict_size=0.,
batch_size=None,
shuffle=True,
datasets=None):
@@ -37,37 +39,64 @@ class PinaDataModule(LightningDataModule):
:param batch_size: batch size used for training
:param datasets: list of datasets objects
"""
logging.debug('Start initialization of Pina DataModule')
logging.info('Start initialization of Pina DataModule')
super().__init__()
dataset_classes = [SupervisedDataset, UnsupervisedDataset,
SamplePointDataset]
self.problem = problem
self.device = device
self.dataset_classes = [SupervisedDataset, UnsupervisedDataset,
SamplePointDataset]
if datasets is None:
self.datasets = [DatasetClass(problem, device) for DatasetClass in
dataset_classes]
self.datasets = None
else:
self.datasets = datasets
self.split_length = []
self.split_names = []
self.loader_functions = {}
self.batch_size = batch_size
self.condition_names = problem.collector.conditions_name
if train_size > 0:
self.split_names.append('train')
self.split_length.append(train_size)
self.loader_functions['train_dataloader'] = lambda: PinaDataLoader(
self.splits['train'], self.batch_size, self.condition_names)
if test_size > 0:
self.split_length.append(test_size)
self.split_names.append('test')
if eval_size > 0:
self.split_length.append(eval_size)
self.split_names.append('eval')
self.batch_size = batch_size
self.condition_names = None
self.loader_functions['test_dataloader'] = lambda: PinaDataLoader(
self.splits['test'], self.batch_size, self.condition_names)
if val_size > 0:
self.split_length.append(val_size)
self.split_names.append('val')
self.loader_functions['val_dataloader'] = lambda: PinaDataLoader(
self.splits['val'], self.batch_size,
self.condition_names)
if predict_size > 0:
self.split_length.append(predict_size)
self.split_names.append('predict')
self.loader_functions[
'predict_dataloader'] = lambda: PinaDataLoader(
self.splits['predict'], self.batch_size,
self.condition_names)
self.splits = {k: {} for k in self.split_names}
self.shuffle = shuffle
for k, v in self.loader_functions.items():
setattr(self, k, v)
def prepare_data(self):
if self.datasets is None:
self._create_datasets()
def setup(self, stage=None):
"""
Perform the splitting of the dataset
"""
self.extract_conditions()
logging.debug('Start setup of Pina DataModule obj')
if self.datasets is None:
self._create_datasets()
if stage == 'fit' or stage is None:
for dataset in self.datasets:
if len(dataset) > 0:
@@ -82,53 +111,6 @@ class PinaDataModule(LightningDataModule):
else:
raise ValueError("stage must be either 'fit' or 'test'")
def extract_conditions(self):
"""
Extract conditions from dataset and update condition indices
"""
# Extract number of conditions
n_conditions = 0
for dataset in self.datasets:
if n_conditions != 0:
dataset.condition_names = {
key + n_conditions: value
for key, value in dataset.condition_names.items()
}
n_conditions += len(dataset.condition_names)
self.condition_names = {
key: value
for dataset in self.datasets
for key, value in dataset.condition_names.items()
}
def train_dataloader(self):
"""
Return the training dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['train'], self.batch_size,
self.condition_names)
def test_dataloader(self):
"""
Return the testing dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['test'], self.batch_size,
self.condition_names)
def eval_dataloader(self):
"""
Return the evaluation dataloader for the dataset
:return: data loader
:rtype: PinaDataLoader
"""
return PinaDataLoader(self.splits['eval'], self.batch_size,
self.condition_names)
@staticmethod
def dataset_split(dataset, lengths, seed=None, shuffle=True):
"""
@@ -141,30 +123,28 @@ class PinaDataModule(LightningDataModule):
:rtype: PinaSubset
"""
if sum(lengths) - 1 < 1e-3:
len_dataset = len(dataset)
lengths = [
int(math.floor(len(dataset) * length)) for length in lengths
int(math.floor(len_dataset * length)) for length in lengths
]
remainder = len(dataset) - sum(lengths)
for i in range(remainder):
lengths[i % len(lengths)] += 1
elif sum(lengths) - 1 >= 1e-3:
raise ValueError(f"Sum of lengths is {sum(lengths)} less than 1")
if sum(lengths) != len(dataset):
raise ValueError("Sum of lengths is not equal to dataset length")
if shuffle:
if seed is not None:
generator = torch.Generator()
generator.manual_seed(seed)
indices = torch.randperm(sum(lengths),
generator=generator).tolist()
generator=generator)
else:
indices = torch.arange(sum(lengths)).tolist()
else:
indices = torch.arange(0, sum(lengths), 1,
dtype=torch.uint8).tolist()
indices = torch.randperm(sum(lengths))
dataset.apply_shuffle(indices)
indices = torch.arange(0, sum(lengths), 1,
dtype=torch.uint8).tolist()
offsets = [
sum(lengths[:i]) if i > 0 else 0 for i in range(len(lengths))
]
@@ -172,3 +152,29 @@ class PinaDataModule(LightningDataModule):
PinaSubset(dataset, indices[offset:offset + length])
for offset, length in zip(offsets, lengths)
]
def _create_datasets(self):
"""
Create the dataset objects putting data
"""
logging.debug('Dataset creation in PinaDataModule obj')
collector = self.problem.collector
batching_dim = self.problem.batching_dimension
datasets_slots = [i.__slots__ for i in self.dataset_classes]
self.datasets = [dataset(device=self.device) for dataset in
self.dataset_classes]
logging.debug('Filling datasets in PinaDataModule obj')
for name, data in collector.data_collections.items():
keys = list(data.keys())
idx = [key for key, val in collector.conditions_name.items() if
val == name]
for i, slot in enumerate(datasets_slots):
if slot == keys:
self.datasets[i].add_points(data, idx[0], batching_dim)
continue
datasets = []
for dataset in self.datasets:
if not dataset.empty:
dataset.initialize()
datasets.append(dataset)
self.datasets = datasets

View File

@@ -10,13 +10,15 @@ class Batch:
optimization.
"""
def __init__(self, dataset_dict, idx_dict):
def __init__(self, dataset_dict, idx_dict, require_grad=True):
self.attributes = []
for k, v in dataset_dict.items():
setattr(self, k, v)
self.attributes.append(k)
for k, v in idx_dict.items():
setattr(self, k + '_idx', v)
self.require_grad = require_grad
def __len__(self):
"""
@@ -31,9 +33,18 @@ class Batch:
length += len(getattr(self, dataset))
return length
def __getattribute__(self, item):
if item in super().__getattribute__('attributes'):
dataset = super().__getattribute__(item)
index = super().__getattribute__(item + '_idx')
return PinaSubset(
dataset.dataset,
dataset.indices[index])
else:
return super().__getattribute__(item)
def __getattr__(self, item):
if not item in dir(self):
raise AttributeError(f'Batch instance has no attribute {item}')
return PinaSubset(
getattr(self, item).dataset,
getattr(self, item).indices[self.coordinates_dict[item]])
if item == 'data' and len(self.attributes) == 1:
item = self.attributes[0]
return super().__getattribute__(item)
raise AttributeError(f"'Batch' object has no attribute '{item}'")

View File

@@ -2,21 +2,22 @@
Module for PinaSubset class
"""
from pina import LabelTensor
from torch import Tensor
from torch import Tensor, float32
class PinaSubset:
"""
TODO
"""
__slots__ = ['dataset', 'indices']
__slots__ = ['dataset', 'indices', 'require_grad']
def __init__(self, dataset, indices):
def __init__(self, dataset, indices, require_grad=True):
"""
TODO
"""
self.dataset = dataset
self.indices = indices
self.require_grad = require_grad
def __len__(self):
"""
@@ -27,7 +28,9 @@ class PinaSubset:
def __getattr__(self, name):
tensor = self.dataset.__getattribute__(name)
if isinstance(tensor, (LabelTensor, Tensor)):
return tensor[self.indices]
tensor = tensor[[self.indices]].to(self.dataset.device)
return tensor.requires_grad_(
self.require_grad) if tensor.dtype == float32 else tensor
if isinstance(tensor, list):
return [tensor[i] for i in self.indices]
raise AttributeError("No attribute named {}".format(name))
raise AttributeError(f"No attribute named {name}")

View File

@@ -1,8 +1,9 @@
"""
Sample dataset module
"""
from copy import deepcopy
from .base_dataset import BaseDataset
from ..condition.input_equation_condition import InputPointsEquationCondition
from ..condition import InputPointsEquationCondition
class SamplePointDataset(BaseDataset):
@@ -12,3 +13,21 @@ class SamplePointDataset(BaseDataset):
"""
data_type = 'physics'
__slots__ = InputPointsEquationCondition.__slots__
def add_points(self, data_dict, condition_idx, batching_dim=0):
data_dict = deepcopy(data_dict)
data_dict.pop('equation')
super().add_points(data_dict, condition_idx)
def _init_from_problem(self, collector_dict, batching_dim=0):
for name, data in collector_dict.items():
keys = list(data.keys())
if set(self.__slots__) == set(keys):
data = deepcopy(data)
data.pop('equation')
self._populate_init_list(data)
idx = [key for key, val in
self.problem.collector.conditions_name.items() if
val == name]
self.conditions_idx.append(idx)
self.initialize()