Update solvers (#434)
* Enable DDP training with batch_size=None and add validity check for split sizes * Refactoring SolverInterfaces (#435) * Solver update + weighting * Updating PINN for 0.2 * Modify GAROM + tests * Adding more versatile loggers * Disable compilation when running on Windows * Fix tests --------- Co-authored-by: giovanni <giovanni.canali98@yahoo.it> Co-authored-by: FilippoOlivo <filippo@filippoolivo.com>
This commit is contained in:
committed by
Nicola Demo
parent
780c4921eb
commit
9cae9a438f
@@ -1,7 +1,5 @@
|
||||
"""PINA Callbacks Implementations"""
|
||||
|
||||
from lightning.pytorch.core.module import LightningModule
|
||||
from lightning.pytorch.trainer.trainer import Trainer
|
||||
import torch
|
||||
import copy
|
||||
|
||||
@@ -16,30 +14,19 @@ class MetricTracker(Callback):
|
||||
|
||||
def __init__(self, metrics_to_track=None):
|
||||
"""
|
||||
PINA Implementation of a Lightning Callback for Metric Tracking.
|
||||
Lightning Callback for Metric Tracking.
|
||||
|
||||
This class provides functionality to track relevant metrics during
|
||||
the training process.
|
||||
Tracks specific metrics during the training process.
|
||||
|
||||
:ivar _collection: A list to store collected metrics after each
|
||||
training epoch.
|
||||
:ivar _collection: A list to store collected metrics after each epoch.
|
||||
|
||||
:param trainer: The trainer object managing the training process.
|
||||
:type trainer: pytorch_lightning.Trainer
|
||||
|
||||
:return: A dictionary containing aggregated metric values.
|
||||
:rtype: dict
|
||||
|
||||
Example:
|
||||
>>> tracker = MetricTracker()
|
||||
>>> # ... Perform training ...
|
||||
>>> metrics = tracker.metrics
|
||||
:param metrics_to_track: List of metrics to track. Defaults to train/val loss.
|
||||
:type metrics_to_track: list, optional
|
||||
"""
|
||||
super().__init__()
|
||||
self._collection = []
|
||||
if metrics_to_track is not None:
|
||||
metrics_to_track = ['train_loss_epoch', 'train_loss_step', 'val_loss']
|
||||
self.metrics_to_track = metrics_to_track
|
||||
# Default to tracking 'train_loss' and 'val_loss' if not specified
|
||||
self.metrics_to_track = metrics_to_track or ['train_loss', 'val_loss']
|
||||
|
||||
def on_train_epoch_end(self, trainer, pl_module):
|
||||
"""
|
||||
@@ -47,35 +34,44 @@ class MetricTracker(Callback):
|
||||
|
||||
:param trainer: The trainer object managing the training process.
|
||||
:type trainer: pytorch_lightning.Trainer
|
||||
:param pl_module: Placeholder argument.
|
||||
:param pl_module: The model being trained (not used here).
|
||||
"""
|
||||
super().on_train_epoch_end(trainer, pl_module)
|
||||
# Track metrics after the first epoch onwards
|
||||
if trainer.current_epoch > 0:
|
||||
self._collection.append(
|
||||
copy.deepcopy(trainer.logged_metrics)
|
||||
) # track them
|
||||
# Append only the tracked metrics to avoid unnecessary data
|
||||
tracked_metrics = {
|
||||
k: v for k, v in trainer.logged_metrics.items()
|
||||
if k in self.metrics_to_track
|
||||
}
|
||||
self._collection.append(copy.deepcopy(tracked_metrics))
|
||||
|
||||
@property
|
||||
def metrics(self):
|
||||
"""
|
||||
Aggregate collected metrics during training.
|
||||
Aggregate collected metrics over all epochs.
|
||||
|
||||
:return: A dictionary containing aggregated metric values.
|
||||
:rtype: dict
|
||||
"""
|
||||
common_keys = set.intersection(*map(set, self._collection))
|
||||
v = {
|
||||
if not self._collection:
|
||||
return {}
|
||||
|
||||
# Get intersection of keys across all collected dictionaries
|
||||
common_keys = set(self._collection[0]).intersection(*self._collection[1:])
|
||||
|
||||
# Stack the metric values for common keys and return
|
||||
return {
|
||||
k: torch.stack([dic[k] for dic in self._collection])
|
||||
for k in common_keys
|
||||
for k in common_keys if k in self.metrics_to_track
|
||||
}
|
||||
return v
|
||||
|
||||
|
||||
|
||||
class PINAProgressBar(TQDMProgressBar):
|
||||
|
||||
BAR_FORMAT = "{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_noinv_fmt}{postfix}]"
|
||||
|
||||
def __init__(self, metrics="val_loss", **kwargs):
|
||||
def __init__(self, metrics="val", **kwargs):
|
||||
"""
|
||||
PINA Implementation of a Lightning Callback for enriching the progress
|
||||
bar.
|
||||
@@ -131,14 +127,6 @@ class PINAProgressBar(TQDMProgressBar):
|
||||
pbar_metrics = {
|
||||
key: pbar_metrics[key] for key in self._sorted_metrics
|
||||
}
|
||||
duplicates = list(standard_metrics.keys() & pbar_metrics.keys())
|
||||
if duplicates:
|
||||
rank_zero_warn(
|
||||
f"The progress bar already tracks a metric with the name(s) '{', '.join(duplicates)}' and"
|
||||
f" `self.log('{duplicates[0]}', ..., prog_bar=True)` will overwrite this value. "
|
||||
" If this is undesired, change the name or override `get_metrics()` in the progress bar callback.",
|
||||
)
|
||||
|
||||
return {**standard_metrics, **pbar_metrics}
|
||||
|
||||
def on_fit_start(self, trainer, pl_module):
|
||||
@@ -154,7 +142,7 @@ class PINAProgressBar(TQDMProgressBar):
|
||||
for key in self._sorted_metrics:
|
||||
if (
|
||||
key not in trainer.solver.problem.conditions.keys()
|
||||
and key != "mean"
|
||||
and key != "train" and key != "val"
|
||||
):
|
||||
raise KeyError(f"Key '{key}' is not present in the dictionary")
|
||||
# add the loss pedix
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from . import LabelTensor
|
||||
from .utils import check_consistency, merge_tensors
|
||||
from .utils import check_consistency
|
||||
|
||||
|
||||
class Collector:
|
||||
@@ -8,11 +7,6 @@ class Collector:
|
||||
# creating a hook between collector and problem
|
||||
self.problem = problem
|
||||
|
||||
# this variable is used to store the data in the form:
|
||||
# {'[condition_name]' :
|
||||
# {'input_points' : Tensor,
|
||||
# '[equation/output_points/conditional_variables]': Tensor}
|
||||
# }
|
||||
# those variables are used for the dataloading
|
||||
self._data_collections = {name: {} for name in self.problem.conditions}
|
||||
self.conditions_name = {
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
from lightning.pytorch import LightningDataModule
|
||||
import math
|
||||
import torch
|
||||
from ..label_tensor import LabelTensor
|
||||
from torch.utils.data import DataLoader, BatchSampler, SequentialSampler, \
|
||||
@@ -10,8 +9,38 @@ from .dataset import PinaDatasetFactory
|
||||
from ..collector import Collector
|
||||
|
||||
class DummyDataloader:
|
||||
def __init__(self, dataset, device):
|
||||
self.dataset = dataset.get_all_data()
|
||||
""""
|
||||
Dummy dataloader used when batch size is None. It callects all the data
|
||||
in self.dataset and returns it when it is called a single batch.
|
||||
"""
|
||||
|
||||
def __init__(self, dataset):
|
||||
"""
|
||||
param dataset: The dataset object to be processed.
|
||||
:notes:
|
||||
- **Distributed Environment**:
|
||||
- Divides the dataset across processes using the
|
||||
rank and world size.
|
||||
- Fetches only the portion of data corresponding to
|
||||
the current process.
|
||||
- **Non-Distributed Environment**:
|
||||
- Fetches the entire dataset.
|
||||
"""
|
||||
if (torch.distributed.is_available() and
|
||||
torch.distributed.is_initialized()):
|
||||
rank = torch.distributed.get_rank()
|
||||
world_size = torch.distributed.get_world_size()
|
||||
if len(dataset) < world_size:
|
||||
raise RuntimeError(
|
||||
"Dimension of the dataset smaller than world size."
|
||||
" Increase the size of the partition or use a single GPU")
|
||||
idx, i = [], rank
|
||||
while i < len(dataset):
|
||||
idx.append(i)
|
||||
i += world_size
|
||||
self.dataset = dataset.fetch_from_idx_list(idx)
|
||||
else:
|
||||
self.dataset = dataset.get_all_data()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
@@ -50,7 +79,7 @@ class Collator:
|
||||
for arg in condition_args:
|
||||
data_list = [batch[idx][condition_name][arg] for idx in range(
|
||||
min(len(batch),
|
||||
self.max_conditions_lengths[condition_name]))]
|
||||
self.max_conditions_lengths[condition_name]))]
|
||||
if isinstance(data_list[0], LabelTensor):
|
||||
single_cond_dict[arg] = LabelTensor.stack(data_list)
|
||||
elif isinstance(data_list[0], torch.Tensor):
|
||||
@@ -61,7 +90,6 @@ class Collator:
|
||||
batch_dict[condition_name] = single_cond_dict
|
||||
return batch_dict
|
||||
|
||||
|
||||
def __call__(self, batch):
|
||||
return self.callable_function(batch)
|
||||
|
||||
@@ -99,6 +127,7 @@ class PinaDataModule(LightningDataModule):
|
||||
):
|
||||
"""
|
||||
Initialize the object, creating dataset based on input problem
|
||||
:param problem: Problem where data are defined
|
||||
:param train_size: number/percentage of elements in train split
|
||||
:param test_size: number/percentage of elements in test split
|
||||
:param val_size: number/percentage of elements in evaluation split
|
||||
@@ -112,6 +141,9 @@ class PinaDataModule(LightningDataModule):
|
||||
self.shuffle = shuffle
|
||||
self.repeat = repeat
|
||||
|
||||
# Check if the splits are correct
|
||||
self._check_slit_sizes(train_size, test_size, val_size, predict_size)
|
||||
|
||||
# Begin Data splitting
|
||||
splits_dict = {}
|
||||
if train_size > 0:
|
||||
@@ -179,23 +211,28 @@ class PinaDataModule(LightningDataModule):
|
||||
len_condition = len(condition_dict['input_points'])
|
||||
|
||||
lengths = [
|
||||
int(math.floor(len_condition * length)) for length in
|
||||
int(len_condition * length) for length in
|
||||
splits_dict.values()
|
||||
]
|
||||
|
||||
remainder = len_condition - sum(lengths)
|
||||
for i in range(remainder):
|
||||
lengths[i % len(lengths)] += 1
|
||||
splits_dict = {k: v for k, v in zip(splits_dict.keys(), lengths)
|
||||
|
||||
splits_dict = {k: max(1, v) for k, v in zip(splits_dict.keys(), lengths)
|
||||
}
|
||||
to_return_dict = {}
|
||||
offset = 0
|
||||
|
||||
for stage, stage_len in splits_dict.items():
|
||||
to_return_dict[stage] = {k: v[offset:offset + stage_len]
|
||||
for k, v in condition_dict.items() if
|
||||
k != 'equation'
|
||||
# Equations are NEVER dataloaded
|
||||
}
|
||||
if offset + stage_len > len_condition:
|
||||
offset = len_condition - 1
|
||||
continue
|
||||
offset += stage_len
|
||||
return to_return_dict
|
||||
|
||||
@@ -234,6 +271,26 @@ class PinaDataModule(LightningDataModule):
|
||||
dataset_dict[key].update({condition_name: data})
|
||||
return dataset_dict
|
||||
|
||||
|
||||
def _create_dataloader(self, split, dataset):
|
||||
shuffle = self.shuffle if split == 'train' else False
|
||||
# Use custom batching (good if batch size is large)
|
||||
if self.batch_size is not None:
|
||||
sampler = PinaSampler(dataset, self.batch_size,
|
||||
shuffle, self.automatic_batching)
|
||||
if self.automatic_batching:
|
||||
collate = Collator(self.find_max_conditions_lengths(split))
|
||||
|
||||
else:
|
||||
collate = Collator(None, dataset)
|
||||
return DataLoader(dataset, self.batch_size,
|
||||
collate_fn=collate, sampler=sampler)
|
||||
dataloader = DummyDataloader(dataset)
|
||||
dataloader.dataset = self._transfer_batch_to_device(
|
||||
dataloader.dataset, self.trainer.strategy.root_device, 0)
|
||||
self.transfer_batch_to_device = self._transfer_batch_to_device_dummy
|
||||
return dataloader
|
||||
|
||||
def find_max_conditions_lengths(self, split):
|
||||
max_conditions_lengths = {}
|
||||
for k, v in self.collector_splits[split].items():
|
||||
@@ -250,52 +307,19 @@ class PinaDataModule(LightningDataModule):
|
||||
"""
|
||||
Create the validation dataloader
|
||||
"""
|
||||
# Use custom batching (good if batch size is large)
|
||||
if self.batch_size is not None:
|
||||
sampler = PinaSampler(self.val_dataset, self.batch_size,
|
||||
self.shuffle, self.automatic_batching)
|
||||
if self.automatic_batching:
|
||||
collate = Collator(self.find_max_conditions_lengths('val'))
|
||||
else:
|
||||
collate = Collator(None, self.val_dataset)
|
||||
return DataLoader(self.val_dataset, self.batch_size,
|
||||
collate_fn=collate, sampler=sampler)
|
||||
dataloader = DummyDataloader(self.val_dataset,
|
||||
self.trainer.strategy.root_device)
|
||||
dataloader.dataset = self._transfer_batch_to_device(dataloader.dataset,
|
||||
self.trainer.strategy.root_device,
|
||||
0)
|
||||
self.transfer_batch_to_device = self._transfer_batch_to_device_dummy
|
||||
return dataloader
|
||||
return self._create_dataloader('val', self.val_dataset)
|
||||
|
||||
def train_dataloader(self):
|
||||
"""
|
||||
Create the training dataloader
|
||||
"""
|
||||
# Use custom batching (good if batch size is large)
|
||||
if self.batch_size is not None:
|
||||
sampler = PinaSampler(self.train_dataset, self.batch_size,
|
||||
self.shuffle, self.automatic_batching)
|
||||
if self.automatic_batching:
|
||||
collate = Collator(self.find_max_conditions_lengths('train'))
|
||||
|
||||
else:
|
||||
collate = Collator(None, self.train_dataset)
|
||||
return DataLoader(self.train_dataset, self.batch_size,
|
||||
collate_fn=collate, sampler=sampler)
|
||||
dataloader = DummyDataloader(self.train_dataset,
|
||||
self.trainer.strategy.root_device)
|
||||
dataloader.dataset = self._transfer_batch_to_device(dataloader.dataset,
|
||||
self.trainer.strategy.root_device,
|
||||
0)
|
||||
self.transfer_batch_to_device = self._transfer_batch_to_device_dummy
|
||||
return dataloader
|
||||
return self._create_dataloader('train', self.train_dataset)
|
||||
|
||||
def test_dataloader(self):
|
||||
"""
|
||||
Create the testing dataloader
|
||||
"""
|
||||
raise NotImplementedError("Test dataloader not implemented")
|
||||
return self._create_dataloader('test', self.test_dataset)
|
||||
|
||||
def predict_dataloader(self):
|
||||
"""
|
||||
@@ -303,7 +327,8 @@ class PinaDataModule(LightningDataModule):
|
||||
"""
|
||||
raise NotImplementedError("Predict dataloader not implemented")
|
||||
|
||||
def _transfer_batch_to_device_dummy(self, batch, device, dataloader_idx):
|
||||
@staticmethod
|
||||
def _transfer_batch_to_device_dummy(batch, device, dataloader_idx):
|
||||
return batch
|
||||
|
||||
def _transfer_batch_to_device(self, batch, device, dataloader_idx):
|
||||
@@ -312,10 +337,34 @@ class PinaDataModule(LightningDataModule):
|
||||
training loop and is used to transfer the batch to the device.
|
||||
"""
|
||||
batch = [
|
||||
(k, super(LightningDataModule, self).transfer_batch_to_device(v,
|
||||
device,
|
||||
dataloader_idx))
|
||||
(k,
|
||||
super(LightningDataModule, self).transfer_batch_to_device(
|
||||
v, device, dataloader_idx))
|
||||
for k, v in batch.items()
|
||||
]
|
||||
|
||||
return batch
|
||||
|
||||
@staticmethod
|
||||
def _check_slit_sizes(train_size, test_size, val_size, predict_size):
|
||||
"""
|
||||
Check if the splits are correct
|
||||
"""
|
||||
if train_size < 0 or test_size < 0 or val_size < 0 or predict_size < 0:
|
||||
raise ValueError("The splits must be positive")
|
||||
if abs(train_size + test_size + val_size + predict_size - 1) > 1e-6:
|
||||
raise ValueError("The sum of the splits must be 1")
|
||||
|
||||
@property
|
||||
def input_points(self):
|
||||
"""
|
||||
# TODO
|
||||
"""
|
||||
to_return = {}
|
||||
if hasattr(self, "train_dataset") and self.train_dataset is not None:
|
||||
to_return["train"] = self.train_dataset.input_points
|
||||
if hasattr(self, "val_dataset") and self.val_dataset is not None:
|
||||
to_return["val"] = self.val_dataset.input_points
|
||||
if hasattr(self, "test_dataset") and self.test_dataset is not None:
|
||||
to_return = self.test_dataset.input_points
|
||||
return to_return
|
||||
|
||||
@@ -92,6 +92,15 @@ class PinaTensorDataset(PinaDataset):
|
||||
def __getitem__(self, idx):
|
||||
return self._getitem_func(idx)
|
||||
|
||||
@property
|
||||
def input_points(self):
|
||||
"""
|
||||
Method to return input points for training.
|
||||
"""
|
||||
return {
|
||||
k: v['input_points'] for k, v in self.conditions_dict.items()
|
||||
}
|
||||
|
||||
|
||||
class PinaGraphDataset(PinaDataset):
|
||||
|
||||
@@ -110,10 +119,12 @@ class PinaGraphDataset(PinaDataset):
|
||||
condition_len = self.conditions_length[condition]
|
||||
if self.length > condition_len:
|
||||
cond_idx = [idx % condition_len for idx in cond_idx]
|
||||
to_return_dict[condition] = {k: Batch.from_data_list([v[i]
|
||||
for i in cond_idx])
|
||||
if isinstance(v, list)
|
||||
else v[cond_idx].reshape(-1, *v[cond_idx].shape[2:])
|
||||
to_return_dict[condition] = {k: Batch.from_data_list([
|
||||
v[i] for i in cond_idx])
|
||||
if isinstance(v, list)
|
||||
else v[
|
||||
cond_idx].reshape(
|
||||
-1, *v[cond_idx].shape[2:])
|
||||
for k, v in data.items()
|
||||
}
|
||||
return to_return_dict
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
__all__ = [
|
||||
'LossInterface',
|
||||
'LpLoss',
|
||||
'PowerLoss',
|
||||
'weightningInterface',
|
||||
'LossInterface'
|
||||
'WeightingInterface',
|
||||
'ScalarWeighting'
|
||||
]
|
||||
|
||||
from .loss_interface import LossInterface
|
||||
from .power_loss import PowerLoss
|
||||
from .lp_loss import LpLoss
|
||||
from .weightning_interface import weightningInterface
|
||||
from .weighting_interface import WeightingInterface
|
||||
from .scalar_weighting import ScalarWeighting
|
||||
|
||||
36
pina/loss/scalar_weighting.py
Normal file
36
pina/loss/scalar_weighting.py
Normal file
@@ -0,0 +1,36 @@
|
||||
""" Module for Loss Interface """
|
||||
|
||||
from .weighting_interface import WeightingInterface
|
||||
from ..utils import check_consistency
|
||||
|
||||
|
||||
class _NoWeighting(WeightingInterface):
|
||||
def aggregate(self, losses):
|
||||
return sum(losses.values())
|
||||
|
||||
class ScalarWeighting(WeightingInterface):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
def __init__(self, weights):
|
||||
super().__init__()
|
||||
check_consistency([weights], (float, dict, int))
|
||||
if isinstance(weights, (float, int)):
|
||||
self.default_value_weights = weights
|
||||
self.weights = {}
|
||||
else:
|
||||
self.default_value_weights = 1
|
||||
self.weights = weights
|
||||
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict(torch.Tensor) losses: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return sum(
|
||||
self.weights.get(condition, self.default_value_weights) * loss for
|
||||
condition, loss in losses.items()
|
||||
)
|
||||
@@ -1,35 +0,0 @@
|
||||
""" Module for Loss Interface """
|
||||
|
||||
from .weightning_interface import weightningInterface
|
||||
|
||||
|
||||
class WeightedAggregation(WeightningInterface):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
def __init__(self, aggr='mean', weights=None):
|
||||
self.aggr = aggr
|
||||
self.weights = weights
|
||||
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict(torch.Tensor) input: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
if self.weights:
|
||||
weighted_losses = {
|
||||
condition: self.weights[condition] * losses[condition]
|
||||
for condition in losses
|
||||
}
|
||||
else:
|
||||
weighted_losses = losses
|
||||
|
||||
if self.aggr == 'mean':
|
||||
return sum(weighted_losses.values()) / len(weighted_losses)
|
||||
elif self.aggr == 'sum':
|
||||
return sum(weighted_losses.values())
|
||||
else:
|
||||
raise ValueError(self.aggr + " is not valid for aggregation.")
|
||||
@@ -3,21 +3,20 @@
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class weightningInterface(metaclass=ABCMeta):
|
||||
class WeightingInterface(metaclass=ABCMeta):
|
||||
"""
|
||||
The ``weightingInterface`` class. TODO
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
def __init__(self):
|
||||
self.condition_names = None
|
||||
|
||||
@abstractmethod
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param list(torch.Tensor) input: The list
|
||||
:param dict(torch.Tensor) input: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
@@ -1,111 +0,0 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from ..utils import check_consistency
|
||||
from ..label_tensor import LabelTensor
|
||||
|
||||
|
||||
class Network(torch.nn.Module):
|
||||
|
||||
def __init__(
|
||||
self, model, input_variables, output_variables, extra_features=None
|
||||
):
|
||||
"""
|
||||
Network class with standard forward method
|
||||
and possibility to pass extra features. This
|
||||
class is used internally in PINA to convert
|
||||
any :class:`torch.nn.Module` s in a PINA module.
|
||||
|
||||
:param model: The torch model to convert in a PINA model.
|
||||
:type model: torch.nn.Module
|
||||
:param list(str) input_variables: The input variables of the :class:`AbstractProblem`, whose type depends on the
|
||||
type of domain (spatial, temporal, and parameter).
|
||||
:param list(str) output_variables: The output variables of the :class:`AbstractProblem`, whose type depends on the
|
||||
problem setting.
|
||||
:param extra_features: List of torch models to augment the input, defaults to None.
|
||||
:type extra_features: list(torch.nn.Module)
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
# check model consistency
|
||||
check_consistency(model, nn.Module)
|
||||
check_consistency(input_variables, str)
|
||||
if output_variables is not None:
|
||||
check_consistency(output_variables, str)
|
||||
|
||||
self._model = model
|
||||
self._input_variables = input_variables
|
||||
self._output_variables = output_variables
|
||||
|
||||
# check consistency and assign extra fatures
|
||||
if extra_features is None:
|
||||
self._extra_features = []
|
||||
else:
|
||||
for feat in extra_features:
|
||||
check_consistency(feat, nn.Module)
|
||||
self._extra_features = nn.Sequential(*extra_features)
|
||||
|
||||
# check model works with inputs
|
||||
# TODO
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward method for Network class. This class
|
||||
implements the standard forward method, and
|
||||
it adds the possibility to pass extra features.
|
||||
All the PINA models ``forward`` s are overriden
|
||||
by this class, to enable :class:`pina.label_tensor.LabelTensor` labels
|
||||
extraction.
|
||||
|
||||
:param torch.Tensor x: Input of the network.
|
||||
:return torch.Tensor: Output of the network.
|
||||
"""
|
||||
# only labeltensors as input
|
||||
assert isinstance(
|
||||
x, LabelTensor
|
||||
), "Expected LabelTensor as input to the model."
|
||||
|
||||
# extract torch.Tensor from corresponding label
|
||||
# in case `input_variables = []` all points are used
|
||||
if self._input_variables:
|
||||
x = x.extract(self._input_variables)
|
||||
# extract features and append
|
||||
for feature in self._extra_features:
|
||||
x = x.append(feature(x))
|
||||
|
||||
# perform forward pass + converting to LabelTensor
|
||||
x = x.as_subclass(torch.Tensor)
|
||||
output = self._model(x)
|
||||
if self._output_variables is not None:
|
||||
output = LabelTensor(output, self._output_variables)
|
||||
|
||||
return output
|
||||
|
||||
# TODO to remove in next releases (only used in GAROM solver)
|
||||
def forward_map(self, x):
|
||||
"""
|
||||
Forward method for Network class when the input is
|
||||
a tuple. This class is simply a forward with the input casted as a
|
||||
tuple or list :class`torch.Tensor`.
|
||||
All the PINA models ``forward`` s are overriden
|
||||
by this class, to enable :class:`pina.label_tensor.LabelTensor` labels
|
||||
extraction.
|
||||
|
||||
:param list (torch.Tensor) | tuple(torch.Tensor) x: Input of the network.
|
||||
:return torch.Tensor: Output of the network.
|
||||
|
||||
.. note::
|
||||
This function does not extract the input variables, all the variables
|
||||
are used for both tensors. Output variables are correctly applied.
|
||||
"""
|
||||
|
||||
# perform forward pass (using torch.Tensor) + converting to LabelTensor
|
||||
output = LabelTensor(self._model(x.tensor), self._output_variables)
|
||||
return output
|
||||
|
||||
@property
|
||||
def torchmodel(self):
|
||||
return self._model
|
||||
|
||||
@property
|
||||
def extra_features(self):
|
||||
return self._extra_features
|
||||
@@ -1,7 +1,15 @@
|
||||
""" Module for PINA Optimizer """
|
||||
|
||||
from abc import ABCMeta
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class Optimizer(metaclass=ABCMeta): # TODO improve interface
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def instance(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def hook(self):
|
||||
pass
|
||||
@@ -1,7 +1,15 @@
|
||||
""" Module for PINA Optimizer """
|
||||
|
||||
from abc import ABCMeta
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class Scheduler(metaclass=ABCMeta): # TODO improve interface
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def instance(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def hook(self):
|
||||
pass
|
||||
@@ -13,11 +13,14 @@ class TorchOptimizer(Optimizer):
|
||||
|
||||
self.optimizer_class = optimizer_class
|
||||
self.kwargs = kwargs
|
||||
self.optimizer_instance = None
|
||||
self._optimizer_instance = None
|
||||
|
||||
def hook(self, parameters):
|
||||
self.optimizer_instance = self.optimizer_class(parameters,
|
||||
self._optimizer_instance = self.optimizer_class(parameters,
|
||||
**self.kwargs)
|
||||
@property
|
||||
def instance(self):
|
||||
return self.optimizer_instance
|
||||
"""
|
||||
Optimizer instance.
|
||||
"""
|
||||
return self._optimizer_instance
|
||||
|
||||
@@ -19,8 +19,16 @@ class TorchScheduler(Scheduler):
|
||||
|
||||
self.scheduler_class = scheduler_class
|
||||
self.kwargs = kwargs
|
||||
self._scheduler_instance = None
|
||||
|
||||
def hook(self, optimizer):
|
||||
check_consistency(optimizer, Optimizer)
|
||||
self.scheduler_instance = self.scheduler_class(
|
||||
optimizer.optimizer_instance, **self.kwargs)
|
||||
self._scheduler_instance = self.scheduler_class(
|
||||
optimizer.instance, **self.kwargs)
|
||||
|
||||
@property
|
||||
def instance(self):
|
||||
"""
|
||||
Scheduler instance.
|
||||
"""
|
||||
return self._scheduler_instance
|
||||
|
||||
@@ -36,8 +36,7 @@ class AbstractProblem(metaclass=ABCMeta):
|
||||
if not hasattr(self, "domains"):
|
||||
self.domains = {}
|
||||
for cond_name, cond in self.conditions.items():
|
||||
if isinstance(cond, (DomainEquationCondition,
|
||||
InputPointsEquationCondition)):
|
||||
if isinstance(cond, DomainEquationCondition):
|
||||
if isinstance(cond.domain, DomainInterface):
|
||||
self.domains[cond_name] = cond.domain
|
||||
cond.domain = cond_name
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
__all__ = [
|
||||
'Poisson2DSquareProblem',
|
||||
'SupervisedProblem'
|
||||
|
||||
'SupervisedProblem',
|
||||
'InversePoisson2DSquareProblem',
|
||||
'DiffusionReactionProblem',
|
||||
'InverseDiffusionReactionProblem'
|
||||
]
|
||||
|
||||
from .poisson_2d_square import Poisson2DSquareProblem
|
||||
from .supervised_problem import SupervisedProblem
|
||||
from .inverse_poisson_2d_square import InversePoisson2DSquareProblem
|
||||
from .diffusion_reaction import DiffusionReactionProblem
|
||||
from .inverse_diffusion_reaction import InverseDiffusionReactionProblem
|
||||
|
||||
45
pina/problem/zoo/diffusion_reaction.py
Normal file
45
pina/problem/zoo/diffusion_reaction.py
Normal file
@@ -0,0 +1,45 @@
|
||||
""" Definition of the diffusion-reaction problem."""
|
||||
|
||||
import torch
|
||||
from pina import Condition
|
||||
from pina.problem import SpatialProblem, TimeDependentProblem
|
||||
from pina.equation.equation import Equation
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.operators import grad
|
||||
|
||||
def diffusion_reaction(input_, output_):
|
||||
"""
|
||||
Implementation of the diffusion-reaction equation.
|
||||
"""
|
||||
x = input_.extract('x')
|
||||
t = input_.extract('t')
|
||||
u_t = grad(output_, input_, d='t')
|
||||
u_x = grad(output_, input_, d='x')
|
||||
u_xx = grad(u_x, input_, d='x')
|
||||
r = torch.exp(-t) * (1.5 * torch.sin(2*x) + (8/3) * torch.sin(3*x) +
|
||||
(15/4) * torch.sin(4*x) + (63/8) * torch.sin(8*x))
|
||||
return u_t - u_xx - r
|
||||
|
||||
|
||||
class DiffusionReactionProblem(TimeDependentProblem, SpatialProblem):
|
||||
"""
|
||||
Implementation of the diffusion-reaction problem on the spatial interval
|
||||
[-pi, pi] and temporal interval [0,1].
|
||||
"""
|
||||
output_variables = ['u']
|
||||
spatial_domain = CartesianDomain({'x': [-torch.pi, torch.pi]})
|
||||
temporal_domain = CartesianDomain({'t': [0, 1]})
|
||||
|
||||
conditions = {
|
||||
'D': Condition(
|
||||
domain=CartesianDomain({'x': [-torch.pi, torch.pi], 't': [0, 1]}),
|
||||
equation=Equation(diffusion_reaction))
|
||||
}
|
||||
|
||||
def _solution(self, pts):
|
||||
t = pts.extract('t')
|
||||
x = pts.extract('x')
|
||||
return torch.exp(-t) * (
|
||||
torch.sin(x) + (1/2)*torch.sin(2*x) + (1/3)*torch.sin(3*x) +
|
||||
(1/4)*torch.sin(4*x) + (1/8)*torch.sin(8*x)
|
||||
)
|
||||
51
pina/problem/zoo/inverse_diffusion_reaction.py
Normal file
51
pina/problem/zoo/inverse_diffusion_reaction.py
Normal file
@@ -0,0 +1,51 @@
|
||||
""" Definition of the diffusion-reaction problem."""
|
||||
|
||||
import torch
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.problem import SpatialProblem, TimeDependentProblem, InverseProblem
|
||||
from pina.equation.equation import Equation
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.operators import grad
|
||||
|
||||
def diffusion_reaction(input_, output_):
|
||||
"""
|
||||
Implementation of the diffusion-reaction equation.
|
||||
"""
|
||||
x = input_.extract('x')
|
||||
t = input_.extract('t')
|
||||
u_t = grad(output_, input_, d='t')
|
||||
u_x = grad(output_, input_, d='x')
|
||||
u_xx = grad(u_x, input_, d='x')
|
||||
r = torch.exp(-t) * (1.5 * torch.sin(2*x) + (8/3) * torch.sin(3*x) +
|
||||
(15/4) * torch.sin(4*x) + (63/8) * torch.sin(8*x))
|
||||
return u_t - u_xx - r
|
||||
|
||||
class InverseDiffusionReactionProblem(TimeDependentProblem,
|
||||
SpatialProblem,
|
||||
InverseProblem):
|
||||
"""
|
||||
Implementation of the diffusion-reaction inverse problem on the spatial
|
||||
interval [-pi, pi] and temporal interval [0,1], with unknown parameters
|
||||
in the interval [-1,1].
|
||||
"""
|
||||
output_variables = ['u']
|
||||
spatial_domain = CartesianDomain({'x': [-torch.pi, torch.pi]})
|
||||
temporal_domain = CartesianDomain({'t': [0, 1]})
|
||||
unknown_parameter_domain = CartesianDomain({'mu': [-1, 1]})
|
||||
|
||||
conditions = {
|
||||
'D': Condition(
|
||||
domain=CartesianDomain({'x': [-torch.pi, torch.pi], 't': [0, 1]}),
|
||||
equation=Equation(diffusion_reaction)),
|
||||
'data' : Condition(
|
||||
input_points=LabelTensor(torch.randn(10, 2), ['x', 't']),
|
||||
output_points=LabelTensor(torch.randn(10, 1), ['u'])),
|
||||
}
|
||||
|
||||
def _solution(self, pts):
|
||||
t = pts.extract('t')
|
||||
x = pts.extract('x')
|
||||
return torch.exp(-t) * (
|
||||
torch.sin(x) + (1/2)*torch.sin(2*x) + (1/3)*torch.sin(3*x) +
|
||||
(1/4)*torch.sin(4*x) + (1/8)*torch.sin(8*x)
|
||||
)
|
||||
50
pina/problem/zoo/inverse_poisson_2d_square.py
Normal file
50
pina/problem/zoo/inverse_poisson_2d_square.py
Normal file
@@ -0,0 +1,50 @@
|
||||
""" Definition of the inverse Poisson problem on a square domain."""
|
||||
|
||||
import torch
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.equation.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
|
||||
def laplace_equation(input_, output_, params_):
|
||||
"""
|
||||
Implementation of the laplace equation.
|
||||
"""
|
||||
force_term = torch.exp(- 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
- 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
return delta_u - force_term
|
||||
|
||||
class InversePoisson2DSquareProblem(SpatialProblem, InverseProblem):
|
||||
"""
|
||||
Implementation of the inverse 2-dimensional Poisson problem
|
||||
on a square domain, with parameter domain [-1, 1] x [-1, 1].
|
||||
"""
|
||||
output_variables = ['u']
|
||||
x_min, x_max = -2, 2
|
||||
y_min, y_max = -2, 2
|
||||
data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
|
||||
domains = {
|
||||
'g1': CartesianDomain({'x': [x_min, x_max], 'y': y_max}),
|
||||
'g2': CartesianDomain({'x': [x_min, x_max], 'y': y_min}),
|
||||
'g3': CartesianDomain({'x': x_max, 'y': [y_min, y_max]}),
|
||||
'g4': CartesianDomain({'x': x_min, 'y': [y_min, y_max]}),
|
||||
'D': CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]}),
|
||||
}
|
||||
|
||||
conditions = {
|
||||
'nil_g1': Condition(domain='g1', equation=FixedValue(0.0)),
|
||||
'nil_g2': Condition(domain='g2', equation=FixedValue(0.0)),
|
||||
'nil_g3': Condition(domain='g3', equation=FixedValue(0.0)),
|
||||
'nil_g4': Condition(domain='g4', equation=FixedValue(0.0)),
|
||||
'laplace_D': Condition(domain='D', equation=Equation(laplace_equation)),
|
||||
'data': Condition(
|
||||
input_points=data_input.extract(['x', 'y']),
|
||||
output_points=data_output)
|
||||
}
|
||||
@@ -2,23 +2,27 @@
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operators import laplacian
|
||||
from pina import LabelTensor, Condition
|
||||
from pina import Condition
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.equation.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
import torch
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
"""
|
||||
Implementation of the laplace equation.
|
||||
"""
|
||||
force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
torch.sin(input_.extract(['y']) * torch.pi))
|
||||
delta_u = laplacian(output_.extract(['u']), input_)
|
||||
return delta_u - force_term
|
||||
|
||||
|
||||
my_laplace = Equation(laplace_equation)
|
||||
|
||||
|
||||
class Poisson2DSquareProblem(SpatialProblem):
|
||||
"""
|
||||
Implementation of the 2-dimensional Poisson problem on a square domain.
|
||||
"""
|
||||
output_variables = ['u']
|
||||
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
@@ -31,10 +35,10 @@ class Poisson2DSquareProblem(SpatialProblem):
|
||||
}
|
||||
|
||||
conditions = {
|
||||
'nil_g1': Condition(domain='D', equation=FixedValue(0.0)),
|
||||
'nil_g2': Condition(domain='D', equation=FixedValue(0.0)),
|
||||
'nil_g3': Condition(domain='D', equation=FixedValue(0.0)),
|
||||
'nil_g4': Condition(domain='D', equation=FixedValue(0.0)),
|
||||
'nil_g1': Condition(domain='g1', equation=FixedValue(0.0)),
|
||||
'nil_g2': Condition(domain='g2', equation=FixedValue(0.0)),
|
||||
'nil_g3': Condition(domain='g3', equation=FixedValue(0.0)),
|
||||
'nil_g4': Condition(domain='g4', equation=FixedValue(0.0)),
|
||||
'laplace_D': Condition(domain='D', equation=my_laplace),
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
__all__ = [
|
||||
"SolverInterface",
|
||||
"SingleSolverInterface",
|
||||
"MultiSolverInterface",
|
||||
"PINNInterface",
|
||||
"PINN",
|
||||
"GPINN",
|
||||
"GradientPINN",
|
||||
"CausalPINN",
|
||||
"CompetitivePINN",
|
||||
"SAPINN",
|
||||
"SelfAdaptivePINN",
|
||||
"RBAPINN",
|
||||
"SupervisedSolver",
|
||||
"ReducedOrderModelSolver",
|
||||
"GAROM",
|
||||
]
|
||||
|
||||
from .solver import SolverInterface
|
||||
from .solver import SolverInterface, SingleSolverInterface, MultiSolverInterface
|
||||
from .pinns import *
|
||||
from .supervised import SupervisedSolver
|
||||
from .rom import ReducedOrderModelSolver
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
""" Module for GAROM """
|
||||
|
||||
import torch
|
||||
import sys
|
||||
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import (
|
||||
_LRScheduler as LRScheduler,
|
||||
) # torch < 2.0
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
from .solver import SolverInterface
|
||||
from .solver import MultiSolverInterface
|
||||
from ..utils import check_consistency
|
||||
from ..loss.loss_interface import LossInterface
|
||||
from ..condition import InputOutputPointsCondition
|
||||
from ..utils import check_consistency
|
||||
from ..loss import LossInterface, PowerLoss
|
||||
from torch.nn.modules.loss import _Loss
|
||||
|
||||
|
||||
class GAROM(SolverInterface):
|
||||
class GAROM(MultiSolverInterface):
|
||||
"""
|
||||
GAROM solver class. This class implements Generative Adversarial
|
||||
Reduced Order Model solver, using user specified ``models`` to solve
|
||||
@@ -31,20 +25,18 @@ class GAROM(SolverInterface):
|
||||
<https://doi.org/10.48550/arXiv.2305.15881>`_.
|
||||
"""
|
||||
|
||||
accepted_conditions_types = InputOutputPointsCondition
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
generator,
|
||||
discriminator,
|
||||
loss=None,
|
||||
optimizer_generator=torch.optim.Adam,
|
||||
optimizer_generator_kwargs={"lr": 0.001},
|
||||
optimizer_discriminator=torch.optim.Adam,
|
||||
optimizer_discriminator_kwargs={"lr": 0.001},
|
||||
scheduler_generator=ConstantLR,
|
||||
scheduler_generator_kwargs={"factor": 1, "total_iters": 0},
|
||||
scheduler_discriminator=ConstantLR,
|
||||
scheduler_discriminator_kwargs={"factor": 1, "total_iters": 0},
|
||||
optimizer_generator=None,
|
||||
optimizer_discriminator=None,
|
||||
scheduler_generator=None,
|
||||
scheduler_discriminator=None,
|
||||
gamma=0.3,
|
||||
lambda_k=0.001,
|
||||
regularizer=False,
|
||||
@@ -58,20 +50,15 @@ class GAROM(SolverInterface):
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default ``None``. If ``loss`` is ``None`` the defualt
|
||||
``PowerLoss(p=1)`` is used, as in the original paper.
|
||||
:param torch.optim.Optimizer optimizer_generator: The neural
|
||||
:param Optimizer optimizer_generator: The neural
|
||||
network optimizer to use for the generator network
|
||||
, default is `torch.optim.Adam`.
|
||||
:param dict optimizer_generator_kwargs: Optimizer constructor keyword
|
||||
args. for the generator.
|
||||
:param torch.optim.Optimizer optimizer_discriminator: The neural
|
||||
:param Optimizer optimizer_discriminator: The neural
|
||||
network optimizer to use for the discriminator network
|
||||
, default is `torch.optim.Adam`.
|
||||
:param dict optimizer_discriminator_kwargs: Optimizer constructor keyword
|
||||
args. for the discriminator.
|
||||
:param torch.optim.LRScheduler scheduler_generator: Learning
|
||||
:param Scheduler scheduler_generator: Learning
|
||||
rate scheduler for the generator.
|
||||
:param dict scheduler_generator_kwargs: LR scheduler constructor keyword args.
|
||||
:param torch.optim.LRScheduler scheduler_discriminator: Learning
|
||||
:param Scheduler scheduler_discriminator: Learning
|
||||
rate scheduler for the discriminator.
|
||||
:param dict scheduler_discriminator_kwargs: LR scheduler constructor keyword args.
|
||||
:param gamma: Ratio of expected loss for generator and discriminator, defaults to 0.3.
|
||||
@@ -87,53 +74,39 @@ class GAROM(SolverInterface):
|
||||
parameters), and ``output_points``.
|
||||
"""
|
||||
|
||||
super().__init__(
|
||||
models=[generator, discriminator],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_generator, optimizer_discriminator],
|
||||
optimizers_kwargs=[
|
||||
optimizer_generator_kwargs,
|
||||
optimizer_discriminator_kwargs,
|
||||
],
|
||||
)
|
||||
|
||||
# set automatic optimization for GANs
|
||||
self.automatic_optimization = False
|
||||
|
||||
# set loss
|
||||
if loss is None:
|
||||
loss = PowerLoss(p=1)
|
||||
|
||||
super().__init__(
|
||||
models=[generator, discriminator],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_generator, optimizer_discriminator],
|
||||
schedulers=[
|
||||
scheduler_generator,
|
||||
scheduler_discriminator,
|
||||
],
|
||||
use_lt=False
|
||||
)
|
||||
|
||||
# check consistency
|
||||
check_consistency(loss, (LossInterface, _Loss, torch.nn.Module),
|
||||
subclass=False)
|
||||
self._loss = loss
|
||||
|
||||
# set automatic optimization for GANs
|
||||
self.automatic_optimization = False
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler_generator, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_generator_kwargs, dict)
|
||||
check_consistency(scheduler_discriminator, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_discriminator_kwargs, dict)
|
||||
check_consistency(loss, (LossInterface, _Loss))
|
||||
check_consistency(gamma, float)
|
||||
check_consistency(lambda_k, float)
|
||||
check_consistency(regularizer, bool)
|
||||
|
||||
# assign schedulers
|
||||
self._schedulers = [
|
||||
scheduler_generator(
|
||||
self.optimizers[0], **scheduler_generator_kwargs
|
||||
),
|
||||
scheduler_discriminator(
|
||||
self.optimizers[1], **scheduler_discriminator_kwargs
|
||||
),
|
||||
]
|
||||
|
||||
# loss and writer
|
||||
self._loss = loss
|
||||
|
||||
# began hyperparameters
|
||||
self.k = 0
|
||||
self.gamma = gamma
|
||||
self.lambda_k = lambda_k
|
||||
self.regularizer = float(regularizer)
|
||||
self._generator = self.models[0]
|
||||
self._discriminator = self.models[1]
|
||||
|
||||
def forward(self, x, mc_steps=20, variance=False):
|
||||
"""
|
||||
@@ -164,16 +137,6 @@ class GAROM(SolverInterface):
|
||||
|
||||
return mean
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""
|
||||
Optimizer configuration for the GAROM
|
||||
solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
return self.optimizers, self._schedulers
|
||||
|
||||
def sample(self, x):
|
||||
# sampling
|
||||
return self.generator(x)
|
||||
@@ -185,11 +148,11 @@ class GAROM(SolverInterface):
|
||||
optimizer = self.optimizer_generator
|
||||
optimizer.zero_grad()
|
||||
|
||||
generated_snapshots = self.generator(parameters)
|
||||
generated_snapshots = self.sample(parameters)
|
||||
|
||||
# generator loss
|
||||
r_loss = self._loss(snapshots, generated_snapshots)
|
||||
d_fake = self.discriminator.forward_map(
|
||||
d_fake = self.discriminator(
|
||||
[generated_snapshots, parameters]
|
||||
)
|
||||
g_loss = (
|
||||
@@ -202,6 +165,27 @@ class GAROM(SolverInterface):
|
||||
|
||||
return r_loss, g_loss
|
||||
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx):
|
||||
"""
|
||||
This method is called at the end of each training batch, and ovverides
|
||||
the PytorchLightining implementation for logging the checkpoints.
|
||||
|
||||
:param torch.Tensor outputs: The output from the model for the
|
||||
current batch.
|
||||
:param tuple batch: The current batch of data.
|
||||
:param int batch_idx: The index of the current batch.
|
||||
:return: Whatever is returned by the parent
|
||||
method ``on_train_batch_end``.
|
||||
:rtype: Any
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
(
|
||||
self.trainer.fit_loop.epoch_loop.manual_optimization
|
||||
.optim_step_progress.total.completed
|
||||
) += 1
|
||||
|
||||
return super().on_train_batch_end(outputs, batch, batch_idx)
|
||||
|
||||
def _train_discriminator(self, parameters, snapshots):
|
||||
"""
|
||||
Private method to train the discriminator network.
|
||||
@@ -210,11 +194,11 @@ class GAROM(SolverInterface):
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Generate a batch of images
|
||||
generated_snapshots = self.generator(parameters)
|
||||
generated_snapshots = self.sample(parameters)
|
||||
|
||||
# Discriminator pass
|
||||
d_real = self.discriminator.forward_map([snapshots, parameters])
|
||||
d_fake = self.discriminator.forward_map(
|
||||
d_real = self.discriminator([snapshots, parameters])
|
||||
d_fake = self.discriminator(
|
||||
[generated_snapshots, parameters]
|
||||
)
|
||||
|
||||
@@ -242,103 +226,82 @@ class GAROM(SolverInterface):
|
||||
self.k = min(max(self.k, 0), 1) # Constraint to interval [0, 1]
|
||||
return diff
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
def optimization_cycle(self, batch):
|
||||
"""GAROM solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:param batch_idx: The batch index.
|
||||
:type batch_idx: int
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
condition_idx = batch["condition"]
|
||||
|
||||
for condition_id in range(condition_idx.min(), condition_idx.max() + 1):
|
||||
|
||||
condition_name = self._dataloader.condition_names[condition_id]
|
||||
condition = self.problem.conditions[condition_name]
|
||||
pts = batch["pts"].detach()
|
||||
out = batch["output"]
|
||||
|
||||
if condition_name not in self.problem.conditions:
|
||||
raise RuntimeError("Something wrong happened.")
|
||||
|
||||
# for data driven mode
|
||||
if not hasattr(condition, "output_points"):
|
||||
raise NotImplementedError(
|
||||
"GAROM works only in data-driven mode."
|
||||
)
|
||||
|
||||
# get data
|
||||
snapshots = out[condition_idx == condition_id]
|
||||
parameters = pts[condition_idx == condition_id]
|
||||
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
parameters, snapshots = points['input_points'], points['output_points']
|
||||
d_loss_real, d_loss_fake, d_loss = self._train_discriminator(
|
||||
parameters, snapshots
|
||||
)
|
||||
|
||||
r_loss, g_loss = self._train_generator(parameters, snapshots)
|
||||
|
||||
diff = self._update_weights(d_loss_real, d_loss_fake)
|
||||
condition_loss[condition_name] = r_loss
|
||||
|
||||
# logging
|
||||
self.log(
|
||||
"mean_loss",
|
||||
float(r_loss),
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_epoch=True,
|
||||
on_step=False,
|
||||
)
|
||||
self.log(
|
||||
# some extra logging
|
||||
self.store_log(
|
||||
"d_loss",
|
||||
float(d_loss),
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_epoch=True,
|
||||
on_step=False,
|
||||
)
|
||||
self.log(
|
||||
"g_loss",
|
||||
float(g_loss),
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_epoch=True,
|
||||
on_step=False,
|
||||
)
|
||||
self.log(
|
||||
"stability_metric",
|
||||
float(d_loss_real + torch.abs(diff)),
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_epoch=True,
|
||||
on_step=False,
|
||||
self.get_batch_size(batch)
|
||||
)
|
||||
self.store_log(
|
||||
"g_loss",
|
||||
float(g_loss),
|
||||
self.get_batch_size(batch)
|
||||
)
|
||||
self.store_log(
|
||||
"stability_metric",
|
||||
float(d_loss_real + torch.abs(diff)),
|
||||
self.get_batch_size(batch)
|
||||
)
|
||||
return condition_loss
|
||||
|
||||
return
|
||||
def validation_step(self, batch):
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
parameters, snapshots = points['input_points'], points['output_points']
|
||||
snapshots_gen = self.generator(parameters)
|
||||
condition_loss[condition_name] = self._loss(snapshots, snapshots_gen)
|
||||
loss = self.weighting.aggregate(condition_loss)
|
||||
self.store_log('val_loss', loss, self.get_batch_size(batch))
|
||||
return loss
|
||||
|
||||
def test_step(self, batch):
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
parameters, snapshots = points['input_points'], points['output_points']
|
||||
snapshots_gen = self.generator(parameters)
|
||||
condition_loss[condition_name] = self._loss(snapshots, snapshots_gen)
|
||||
loss = self.weighting.aggregate(condition_loss)
|
||||
self.store_log('test_loss', loss, self.get_batch_size(batch))
|
||||
return loss
|
||||
|
||||
@property
|
||||
def generator(self):
|
||||
return self._generator
|
||||
return self.models[0]
|
||||
|
||||
@property
|
||||
def discriminator(self):
|
||||
return self._discriminator
|
||||
return self.models[1]
|
||||
|
||||
@property
|
||||
def optimizer_generator(self):
|
||||
return self.optimizers[0]
|
||||
return self.optimizers[0].instance
|
||||
|
||||
@property
|
||||
def optimizer_discriminator(self):
|
||||
return self.optimizers[1]
|
||||
return self.optimizers[1].instance
|
||||
|
||||
@property
|
||||
def scheduler_generator(self):
|
||||
return self._schedulers[0]
|
||||
return self.schedulers[0].instance
|
||||
|
||||
@property
|
||||
def scheduler_discriminator(self):
|
||||
return self._schedulers[1]
|
||||
return self.schedulers[1].instance
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
__all__ = [
|
||||
"PINNInterface",
|
||||
"PINN",
|
||||
"GPINN",
|
||||
"GradientPINN",
|
||||
"CausalPINN",
|
||||
"CompetitivePINN",
|
||||
"SAPINN",
|
||||
"SelfAdaptivePINN",
|
||||
"RBAPINN",
|
||||
]
|
||||
|
||||
from .pinn_interface import PINNInterface
|
||||
from .pinn import PINN
|
||||
from .gpinn import GPINN
|
||||
from .causalpinn import CausalPINN
|
||||
from .rba_pinn import RBAPINN
|
||||
from .causal_pinn import CausalPINN
|
||||
from .gradient_pinn import GradientPINN
|
||||
from .competitive_pinn import CompetitivePINN
|
||||
from .sapinn import SAPINN
|
||||
from .rbapinn import RBAPINN
|
||||
from .self_adaptive_pinn import SelfAdaptivePINN
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
""" Module for CausalPINN """
|
||||
""" Module for Causal PINN. """
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
from .pinn import PINN
|
||||
from pina.problem import TimeDependentProblem
|
||||
from .pinn import PINN
|
||||
from pina.utils import check_consistency
|
||||
|
||||
|
||||
class CausalPINN(PINN):
|
||||
r"""
|
||||
Causal Physics Informed Neural Network (PINN) solver class.
|
||||
Causal Physics Informed Neural Network (CausalPINN) solver class.
|
||||
This class implements Causal Physics Informed Neural
|
||||
Network solvers, using a user specified ``model`` to solve a specific
|
||||
``problem``. It can be used for solving both forward and inverse problems.
|
||||
@@ -70,45 +67,33 @@ class CausalPINN(PINN):
|
||||
:class:`~pina.problem.timedep_problem.TimeDependentProblem` class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={"lr": 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
eps=100,
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
loss=None,
|
||||
eps=100):
|
||||
"""
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
:param int | float eps: The exponential decay parameter. Note that this
|
||||
value is kept fixed during the training, but can be changed by means
|
||||
of a callback, e.g. for annealing.
|
||||
use; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
|
||||
default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
:param float eps: The exponential decay parameter; default `100`.
|
||||
"""
|
||||
super().__init__(
|
||||
problem=problem,
|
||||
model=model,
|
||||
extra_features=extra_features,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
scheduler=scheduler,
|
||||
scheduler_kwargs=scheduler_kwargs,
|
||||
)
|
||||
super().__init__(model=model,
|
||||
problem=problem,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
# checking consistency
|
||||
check_consistency(eps, (int, float))
|
||||
@@ -116,7 +101,7 @@ class CausalPINN(PINN):
|
||||
if not isinstance(self.problem, TimeDependentProblem):
|
||||
raise ValueError(
|
||||
"Casual PINN works only for problems"
|
||||
"inheritig from TimeDependentProblem."
|
||||
"inheriting from TimeDependentProblem."
|
||||
)
|
||||
|
||||
def loss_phys(self, samples, equation):
|
||||
@@ -134,8 +119,8 @@ class CausalPINN(PINN):
|
||||
# split sequentially ordered time tensors into chunks
|
||||
chunks, labels = self._split_tensor_into_chunks(samples)
|
||||
# compute residuals - this correspond to ordered loss functions
|
||||
# values for each time step. We apply `flatten` such that after
|
||||
# concataning the residuals we obtain a tensor of shape #chunks
|
||||
# values for each time step. Apply `flatten` to ensure obtaining
|
||||
# a tensor of shape #chunks after concatenating the residuals
|
||||
time_loss = []
|
||||
for chunk in chunks:
|
||||
chunk.labels = labels
|
||||
@@ -145,11 +130,10 @@ class CausalPINN(PINN):
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
time_loss.append(loss_val)
|
||||
# store results
|
||||
self.store_log(loss_value=float(sum(time_loss) / len(time_loss)))
|
||||
|
||||
# concatenate residuals
|
||||
time_loss = torch.stack(time_loss)
|
||||
# compute weights (without the gradient storing)
|
||||
# compute weights without storing the gradient
|
||||
with torch.no_grad():
|
||||
weights = self._compute_weights(time_loss)
|
||||
return (weights * time_loss).mean()
|
||||
@@ -197,17 +181,17 @@ class CausalPINN(PINN):
|
||||
:return: Tuple containing the chunks and the original labels.
|
||||
:rtype: Tuple[List[LabelTensor], List]
|
||||
"""
|
||||
# labels input tensors
|
||||
# extract labels
|
||||
labels = tensor.labels
|
||||
# labels input tensors
|
||||
# sort input tensor based on time
|
||||
tensor = self._sort_label_tensor(tensor)
|
||||
# extract time tensor
|
||||
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
|
||||
# count unique tensors in time
|
||||
_, idx_split = time_tensor.unique(return_counts=True)
|
||||
# splitting
|
||||
# split the tensor based on time
|
||||
chunks = torch.split(tensor, tuple(idx_split))
|
||||
return chunks, labels # return chunks
|
||||
return chunks, labels
|
||||
|
||||
def _compute_weights(self, loss):
|
||||
"""
|
||||
@@ -217,7 +201,7 @@ class CausalPINN(PINN):
|
||||
:return: The computed weights for the physics loss.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
# compute comulative loss and multiply by epsilos
|
||||
# compute comulative loss and multiply by epsilon
|
||||
cumulative_loss = self._eps * torch.cumsum(loss, dim=0)
|
||||
# return the exponential of the weghited negative cumulative sum
|
||||
# return the exponential of the negative weighted cumulative sum
|
||||
return torch.exp(-cumulative_loss)
|
||||
@@ -1,23 +1,14 @@
|
||||
""" Module for CompetitivePINN """
|
||||
""" Module for Competitive PINN. """
|
||||
|
||||
import torch
|
||||
import copy
|
||||
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import (
|
||||
_LRScheduler as LRScheduler,
|
||||
) # torch < 2.0
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
from .pinn_interface import PINNInterface
|
||||
from pina.utils import check_consistency
|
||||
from pina.problem import InverseProblem
|
||||
from .pinn_interface import PINNInterface
|
||||
from ..solver import MultiSolverInterface
|
||||
|
||||
|
||||
class CompetitivePINN(PINNInterface):
|
||||
class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
r"""
|
||||
Competitive Physics Informed Neural Network (PINN) solver class.
|
||||
This class implements Competitive Physics Informed Neural
|
||||
@@ -64,82 +55,49 @@ class CompetitivePINN(PINNInterface):
|
||||
``extra_feature``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
discriminator=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer_model=torch.optim.Adam,
|
||||
optimizer_model_kwargs={"lr": 0.001},
|
||||
optimizer_discriminator=torch.optim.Adam,
|
||||
optimizer_discriminator_kwargs={"lr": 0.001},
|
||||
scheduler_model=ConstantLR,
|
||||
scheduler_model_kwargs={"factor": 1, "total_iters": 0},
|
||||
scheduler_discriminator=ConstantLR,
|
||||
scheduler_discriminator_kwargs={"factor": 1, "total_iters": 0},
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
discriminator=None,
|
||||
optimizer_model=None,
|
||||
optimizer_discriminator=None,
|
||||
scheduler_model=None,
|
||||
scheduler_discriminator=None,
|
||||
weighting=None,
|
||||
loss=None):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use
|
||||
for the model.
|
||||
:param torch.nn.Module discriminator: The neural network model to use
|
||||
for the discriminator. If ``None``, the discriminator network will
|
||||
have the same architecture as the model network.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.optim.Optimizer optimizer_model: The neural
|
||||
network optimizer to use for the model network
|
||||
, default is `torch.optim.Adam`.
|
||||
:param dict optimizer_model_kwargs: Optimizer constructor keyword
|
||||
args. for the model.
|
||||
:param torch.optim.Optimizer optimizer_discriminator: The neural
|
||||
network optimizer to use for the discriminator network
|
||||
, default is `torch.optim.Adam`.
|
||||
:param dict optimizer_discriminator_kwargs: Optimizer constructor
|
||||
keyword args. for the discriminator.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning
|
||||
rate scheduler for the model.
|
||||
:param dict scheduler_model_kwargs: LR scheduler constructor
|
||||
keyword args.
|
||||
:param torch.optim.LRScheduler scheduler_discriminator: Learning
|
||||
rate scheduler for the discriminator.
|
||||
:param torch.optim.Optimizer optimizer_model: The neural network
|
||||
optimizer to use for the model network; default `None`.
|
||||
:param torch.optim.Optimizer optimizer_discriminator: The neural network
|
||||
optimizer to use for the discriminator network; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
||||
for the model; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler_discriminator: Learning rate
|
||||
scheduler for the discriminator; default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
"""
|
||||
if discriminator is None:
|
||||
discriminator = copy.deepcopy(model)
|
||||
|
||||
super().__init__(
|
||||
models=[model, discriminator],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_model, optimizer_discriminator],
|
||||
optimizers_kwargs=[
|
||||
optimizer_model_kwargs,
|
||||
optimizer_discriminator_kwargs,
|
||||
],
|
||||
extra_features=None, # CompetitivePINN doesn't take extra features
|
||||
loss=loss,
|
||||
)
|
||||
super().__init__(models=[model, discriminator],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_model, optimizer_discriminator],
|
||||
schedulers=[scheduler_model, scheduler_discriminator],
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
# set automatic optimization for GANs
|
||||
# Set automatic optimization to False
|
||||
self.automatic_optimization = False
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler_model, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_model_kwargs, dict)
|
||||
check_consistency(scheduler_discriminator, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_discriminator_kwargs, dict)
|
||||
|
||||
# assign schedulers
|
||||
self._schedulers = [
|
||||
scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
|
||||
scheduler_discriminator(
|
||||
self.optimizers[1], **scheduler_discriminator_kwargs
|
||||
),
|
||||
]
|
||||
|
||||
self._model = self.models[0]
|
||||
self._discriminator = self.models[1]
|
||||
|
||||
def forward(self, x):
|
||||
r"""
|
||||
Forward pass implementation for the PINN solver. It returns the function
|
||||
@@ -154,6 +112,22 @@ class CompetitivePINN(PINNInterface):
|
||||
"""
|
||||
return self.neural_net(x)
|
||||
|
||||
def training_step(self, batch):
|
||||
"""
|
||||
Solver training step, overridden to perform manual optimization.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
self.optimizer_model.instance.zero_grad()
|
||||
self.optimizer_discriminator.instance.zero_grad()
|
||||
loss = super().training_step(batch)
|
||||
self.optimizer_model.instance.step()
|
||||
self.optimizer_discriminator.instance.step()
|
||||
return loss
|
||||
|
||||
def loss_phys(self, samples, equation):
|
||||
"""
|
||||
Computes the physics loss for the Competitive PINN solver based on given
|
||||
@@ -166,25 +140,26 @@ class CompetitivePINN(PINNInterface):
|
||||
samples and equation.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
# train one step of the model
|
||||
# Train the model for one step
|
||||
with torch.no_grad():
|
||||
discriminator_bets = self.discriminator(samples)
|
||||
loss_val = self._train_model(samples, equation, discriminator_bets)
|
||||
self.store_log(loss_value=float(loss_val))
|
||||
# detaching samples from the computational graph to erase it and setting
|
||||
# the gradient to true to create a new computational graph.
|
||||
|
||||
# Detach samples from the existing computational graph and
|
||||
# create a new one by setting requires_grad to True.
|
||||
# In alternative set `retain_graph=True`.
|
||||
samples = samples.detach()
|
||||
samples.requires_grad = True
|
||||
# train one step of discriminator
|
||||
samples.requires_grad_()
|
||||
|
||||
# Train the discriminator for one step
|
||||
discriminator_bets = self.discriminator(samples)
|
||||
self._train_discriminator(samples, equation, discriminator_bets)
|
||||
return loss_val
|
||||
|
||||
def loss_data(self, input_tensor, output_tensor):
|
||||
def loss_data(self, input_pts, output_pts):
|
||||
"""
|
||||
The data loss for the PINN solver. It computes the loss between the
|
||||
network output against the true solution.
|
||||
The data loss for the CompetitivePINN solver. It computes the loss
|
||||
between the network output against the true solution.
|
||||
|
||||
:param LabelTensor input_tensor: The input to the neural networks.
|
||||
:param LabelTensor output_tensor: The true solution to compare the
|
||||
@@ -192,14 +167,9 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The computed data loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
self.optimizer_model.zero_grad()
|
||||
loss_val = (
|
||||
super()
|
||||
.loss_data(input_tensor, output_tensor)
|
||||
.as_subclass(torch.Tensor)
|
||||
)
|
||||
loss_val = (super().loss_data(input_pts, output_pts))
|
||||
# prepare for optimizer step called in training step
|
||||
loss_val.backward()
|
||||
self.optimizer_model.step()
|
||||
return loss_val
|
||||
|
||||
def configure_optimizers(self):
|
||||
@@ -209,10 +179,12 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
# if the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters that the optimizer needs to optimize
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized
|
||||
self.optimizer_model.hook(self.neural_net.parameters())
|
||||
self.optimizer_discriminator.hook(self.discriminator.parameters())
|
||||
if isinstance(self.problem, InverseProblem):
|
||||
self.optimizer_model.add_param_group(
|
||||
self.optimizer_model.instance.add_param_group(
|
||||
{
|
||||
"params": [
|
||||
self._params[var]
|
||||
@@ -220,7 +192,14 @@ class CompetitivePINN(PINNInterface):
|
||||
]
|
||||
}
|
||||
)
|
||||
return self.optimizers, self._schedulers
|
||||
self.scheduler_model.hook(self.optimizer_model)
|
||||
self.scheduler_discriminator.hook(self.optimizer_discriminator)
|
||||
return (
|
||||
[self.optimizer_model.instance,
|
||||
self.optimizer_discriminator.instance],
|
||||
[self.scheduler_model.instance,
|
||||
self.scheduler_discriminator.instance]
|
||||
)
|
||||
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx):
|
||||
"""
|
||||
@@ -236,9 +215,11 @@ class CompetitivePINN(PINNInterface):
|
||||
:rtype: Any
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
|
||||
1
|
||||
)
|
||||
(
|
||||
self.trainer.fit_loop.epoch_loop.manual_optimization
|
||||
.optim_step_progress.total.completed
|
||||
) += 1
|
||||
|
||||
return super().on_train_batch_end(outputs, batch, batch_idx)
|
||||
|
||||
def _train_discriminator(self, samples, equation, discriminator_bets):
|
||||
@@ -251,22 +232,19 @@ class CompetitivePINN(PINNInterface):
|
||||
:param Tensor discriminator_bets: Predictions made by the discriminator
|
||||
network.
|
||||
"""
|
||||
# manual optimization
|
||||
self.optimizer_discriminator.zero_grad()
|
||||
# compute residual, we detach because the weights of the generator
|
||||
# model are fixed
|
||||
# Compute residual. Detach since discriminator weights are fixed
|
||||
residual = self.compute_residual(
|
||||
samples=samples, equation=equation
|
||||
).detach()
|
||||
# compute competitive residual, the minus is because we maximise
|
||||
|
||||
# Compute competitive residual, then maximise the loss
|
||||
competitive_residual = residual * discriminator_bets
|
||||
loss_val = -self.loss(
|
||||
torch.zeros_like(competitive_residual, requires_grad=True),
|
||||
competitive_residual,
|
||||
).as_subclass(torch.Tensor)
|
||||
# backprop
|
||||
)
|
||||
# prepare for optimizer step called in training step
|
||||
self.manual_backward(loss_val)
|
||||
self.optimizer_discriminator.step()
|
||||
return
|
||||
|
||||
def _train_model(self, samples, equation, discriminator_bets):
|
||||
@@ -281,23 +259,20 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The computed data loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# manual optimization
|
||||
self.optimizer_model.zero_grad()
|
||||
# compute residual (detached for discriminator) and log
|
||||
# Compute residual
|
||||
residual = self.compute_residual(samples=samples, equation=equation)
|
||||
# store logging
|
||||
with torch.no_grad():
|
||||
loss_residual = self.loss(torch.zeros_like(residual), residual)
|
||||
# compute competitive residual, discriminator_bets are detached becase
|
||||
# we optimize only the generator model
|
||||
|
||||
# Compute competitive residual. Detach discriminator_bets
|
||||
# to optimize only the generator model
|
||||
competitive_residual = residual * discriminator_bets.detach()
|
||||
loss_val = self.loss(
|
||||
torch.zeros_like(competitive_residual, requires_grad=True),
|
||||
competitive_residual,
|
||||
).as_subclass(torch.Tensor)
|
||||
# backprop
|
||||
)
|
||||
# prepare for optimizer step called in training step
|
||||
self.manual_backward(loss_val)
|
||||
self.optimizer_model.step()
|
||||
return loss_residual
|
||||
|
||||
@property
|
||||
@@ -308,7 +283,7 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The neural network model.
|
||||
:rtype: torch.nn.Module
|
||||
"""
|
||||
return self._model
|
||||
return self.models[0]
|
||||
|
||||
@property
|
||||
def discriminator(self):
|
||||
@@ -318,7 +293,7 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The discriminator model.
|
||||
:rtype: torch.nn.Module
|
||||
"""
|
||||
return self._discriminator
|
||||
return self.models[1]
|
||||
|
||||
@property
|
||||
def optimizer_model(self):
|
||||
@@ -348,7 +323,7 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The scheduler for the neural network model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
"""
|
||||
return self._schedulers[0]
|
||||
return self.schedulers[0]
|
||||
|
||||
@property
|
||||
def scheduler_discriminator(self):
|
||||
@@ -358,4 +333,4 @@ class CompetitivePINN(PINNInterface):
|
||||
:return: The scheduler for the discriminator.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
"""
|
||||
return self._schedulers[1]
|
||||
return self.schedulers[1]
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
""" Module for GPINN """
|
||||
""" Module for Gradient PINN. """
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
from .pinn import PINN
|
||||
from pina.operators import grad
|
||||
from pina.problem import SpatialProblem
|
||||
|
||||
|
||||
class GPINN(PINN):
|
||||
class GradientPINN(PINN):
|
||||
r"""
|
||||
Gradient Physics Informed Neural Network (GPINN) solver class.
|
||||
Gradient Physics Informed Neural Network (GradientPINN) solver class.
|
||||
This class implements Gradient Physics Informed Neural
|
||||
Network solvers, using a user specified ``model`` to solve a specific
|
||||
``problem``. It can be used for solving both forward and inverse problems.
|
||||
@@ -42,7 +39,8 @@ class GPINN(PINN):
|
||||
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
|
||||
|
||||
|
||||
where :math:`\mathcal{L}` is a specific loss function, default Mean Square Error:
|
||||
where :math:`\mathcal{L}` is a specific loss function,
|
||||
default Mean Square Error:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}(v) = \| v \|^2_2.
|
||||
@@ -61,44 +59,35 @@ class GPINN(PINN):
|
||||
class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={"lr": 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
loss=None):
|
||||
"""
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param AbstractProblem problem: The formulation of the problem. It must
|
||||
inherit from at least
|
||||
:class:`~pina.problem.spatial_problem.SpatialProblem` in order to
|
||||
compute the gradient of the loss.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute
|
||||
the gradient of the loss.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
use; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
|
||||
default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
"""
|
||||
super().__init__(
|
||||
problem=problem,
|
||||
model=model,
|
||||
extra_features=extra_features,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
scheduler=scheduler,
|
||||
scheduler_kwargs=scheduler_kwargs,
|
||||
)
|
||||
super().__init__(model=model,
|
||||
problem=problem,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
if not isinstance(self.problem, SpatialProblem):
|
||||
raise ValueError(
|
||||
"Gradient PINN computes the gradient of the "
|
||||
@@ -124,10 +113,10 @@ class GPINN(PINN):
|
||||
loss_value = self.loss(
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
self.store_log(loss_value=float(loss_value))
|
||||
|
||||
# gradient PINN loss
|
||||
loss_value = loss_value.reshape(-1, 1)
|
||||
loss_value.labels = ["__LOSS"]
|
||||
loss_value.labels = ["__loss"]
|
||||
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
|
||||
g_loss_phys = self.loss(
|
||||
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
|
||||
@@ -2,19 +2,12 @@
|
||||
|
||||
import torch
|
||||
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import (
|
||||
_LRScheduler as LRScheduler,
|
||||
) # torch < 2.0
|
||||
|
||||
|
||||
from .pinn_interface import PINNInterface
|
||||
from ..solver import SingleSolverInterface
|
||||
from ...problem import InverseProblem
|
||||
|
||||
|
||||
class PINN(PINNInterface):
|
||||
class PINN(PINNInterface, SingleSolverInterface):
|
||||
r"""
|
||||
Physics Informed Neural Network (PINN) solver class.
|
||||
This class implements Physics Informed Neural
|
||||
@@ -41,7 +34,8 @@ class PINN(PINNInterface):
|
||||
\frac{1}{N}\sum_{i=1}^N
|
||||
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
|
||||
|
||||
where :math:`\mathcal{L}` is a specific loss function, default Mean Square Error:
|
||||
where :math:`\mathcal{L}` is a specific loss function,
|
||||
default Mean Square Error:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}(v) = \| v \|^2_2.
|
||||
@@ -54,54 +48,31 @@ class PINN(PINNInterface):
|
||||
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
|
||||
"""
|
||||
|
||||
__name__ = 'PINN'
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
loss=None):
|
||||
"""
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
use; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
|
||||
default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
"""
|
||||
super().__init__(
|
||||
models=model,
|
||||
problem=problem,
|
||||
loss=loss,
|
||||
optimizers=optimizer,
|
||||
schedulers=scheduler,
|
||||
)
|
||||
|
||||
# assign variables
|
||||
self._neural_net = self.models[0]
|
||||
|
||||
def forward(self, x):
|
||||
r"""
|
||||
Forward pass implementation for the PINN solver. It returns the function
|
||||
evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points
|
||||
:math:`\mathbf{x}`.
|
||||
|
||||
:param LabelTensor x: Input tensor for the PINN solver. It expects
|
||||
a tensor :math:`N \times D`, where :math:`N` the number of points
|
||||
in the mesh, :math:`D` the dimension of the problem,
|
||||
:return: PINN solution evaluated at contro points.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
return self.neural_net(x)
|
||||
super().__init__(model=model,
|
||||
problem=problem,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
def loss_phys(self, samples, equation):
|
||||
"""
|
||||
@@ -117,46 +88,31 @@ class PINN(PINNInterface):
|
||||
"""
|
||||
residual = self.compute_residual(samples=samples, equation=equation)
|
||||
loss_value = self.loss(
|
||||
torch.zeros_like(residual), residual
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
return loss_value
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""
|
||||
Optimizer configuration for the PINN
|
||||
solver.
|
||||
Optimizer configuration for the PINN solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
# if the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters that the optimizer needs to optimize
|
||||
|
||||
|
||||
self._optimizer.hook(self._model.parameters())
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized.
|
||||
self.optimizer.hook(self.model.parameters())
|
||||
if isinstance(self.problem, InverseProblem):
|
||||
self._optimizer.optimizer_instance.add_param_group(
|
||||
{
|
||||
"params": [
|
||||
self._params[var]
|
||||
for var in self.problem.unknown_variables
|
||||
]
|
||||
}
|
||||
)
|
||||
self._scheduler.hook(self._optimizer)
|
||||
return ([self._optimizer.optimizer_instance],
|
||||
[self._scheduler.scheduler_instance])
|
||||
|
||||
@property
|
||||
def scheduler(self):
|
||||
"""
|
||||
Scheduler for the PINN training.
|
||||
"""
|
||||
return self._scheduler
|
||||
|
||||
@property
|
||||
def neural_net(self):
|
||||
"""
|
||||
Neural network for the PINN training.
|
||||
"""
|
||||
return self._neural_net
|
||||
self.optimizer.instance.add_param_group(
|
||||
{
|
||||
"params": [
|
||||
self._params[var]
|
||||
for var in self.problem.unknown_variables
|
||||
]
|
||||
}
|
||||
)
|
||||
self.scheduler.hook(self.optimizer)
|
||||
return (
|
||||
[self.optimizer.instance],
|
||||
[self.scheduler.instance]
|
||||
)
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
""" Module for PINN """
|
||||
""" Module for Physics Informed Neural Network Interface."""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import torch
|
||||
from torch.nn.modules.loss import _Loss
|
||||
|
||||
from ..solver import SolverInterface
|
||||
from ...utils import check_consistency
|
||||
from ...loss.loss_interface import LossInterface
|
||||
from ...problem import InverseProblem
|
||||
from ...optim import TorchOptimizer, TorchScheduler
|
||||
from ...condition import InputOutputPointsCondition, \
|
||||
InputPointsEquationCondition, DomainEquationCondition
|
||||
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
from ...condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
|
||||
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
@@ -19,57 +20,34 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
Base PINN solver class. This class implements the Solver Interface
|
||||
for Physics Informed Neural Network solvers.
|
||||
|
||||
This class can be used to
|
||||
define PINNs with multiple ``optimizers``, and/or ``models``.
|
||||
By default it takes
|
||||
an :class:`~pina.problem.abstract_problem.AbstractProblem`, so it is up
|
||||
to the user to choose which problem the implemented solver inheriting from
|
||||
this class is suitable for.
|
||||
This class can be used to define PINNs with multiple ``optimizers``,
|
||||
and/or ``models``.
|
||||
By default it takes :class:`~pina.problem.abstract_problem.AbstractProblem`,
|
||||
so the user can choose what type of problem the implemented solver,
|
||||
inheriting from this class, is designed to solve.
|
||||
"""
|
||||
accepted_conditions_types = (InputOutputPointsCondition,
|
||||
InputPointsEquationCondition, DomainEquationCondition)
|
||||
accepted_conditions_types = (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
models,
|
||||
problem,
|
||||
loss=None,
|
||||
optimizers=None,
|
||||
schedulers=None,
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
loss=None,
|
||||
**kwargs):
|
||||
"""
|
||||
:param models: Multiple torch neural network models instances.
|
||||
:type models: list(torch.nn.Module)
|
||||
:param problem: A problem definition instance.
|
||||
:type problem: AbstractProblem
|
||||
:param list(torch.optim.Optimizer) optimizer: A list of neural network
|
||||
optimizers to use.
|
||||
:param list(dict) optimizer_kwargs: A list of optimizer constructor
|
||||
keyword args.
|
||||
:param list(torch.nn.Module) extra_features: The additional input
|
||||
features to use as augmented input. If ``None`` no extra features
|
||||
are passed. If it is a list of :class:`torch.nn.Module`,
|
||||
the extra feature list is passed to all models. If it is a list
|
||||
of extra features' lists, each single list of extra feature
|
||||
is passed to a model.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param AbstractProblem problem: A problem definition instance.
|
||||
:param torch.nn.Module loss: The loss function to be minimized,
|
||||
default `None`.
|
||||
"""
|
||||
if optimizers is None:
|
||||
optimizers = TorchOptimizer(torch.optim.Adam, lr=0.001)
|
||||
|
||||
if schedulers is None:
|
||||
schedulers = TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
|
||||
|
||||
if loss is None:
|
||||
loss = torch.nn.MSELoss()
|
||||
|
||||
super().__init__(
|
||||
models=models,
|
||||
problem=problem,
|
||||
optimizers=optimizers,
|
||||
schedulers=schedulers,
|
||||
)
|
||||
super().__init__(problem=problem,
|
||||
use_lt=True,
|
||||
**kwargs)
|
||||
|
||||
# check consistency
|
||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||
@@ -85,86 +63,24 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
self._params = None
|
||||
self._clamp_params = lambda: None
|
||||
|
||||
# variable used internally to store residual losses at each epoch
|
||||
# this variable save the residual at each iteration (not weighted)
|
||||
self.__logged_res_losses = []
|
||||
self.__metric = None
|
||||
|
||||
# variable used internally in pina for logging. This variable points to
|
||||
# the current condition during the training step and returns the
|
||||
# condition name. Whenever :meth:`store_log` is called the logged
|
||||
# variable will be stored with name = self.__logged_metric
|
||||
self.__logged_metric = None
|
||||
|
||||
self._model = self._pina_models[0]
|
||||
self._optimizer = self._pina_optimizers[0]
|
||||
self._scheduler = self._pina_schedulers[0]
|
||||
|
||||
def training_step(self, batch):
|
||||
"""
|
||||
The Physics Informed Solver Training Step. This function takes care
|
||||
of the physics informed training step, and it must not be override
|
||||
if not intentionally. It handles the batching mechanism, the workload
|
||||
division for the various conditions, the inverse problem clamping,
|
||||
and loggers.
|
||||
|
||||
:param tuple batch: The batch element in the dataloader.
|
||||
:param int batch_idx: The batch index.
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
condition_loss = []
|
||||
for condition_name, points in batch:
|
||||
if 'output_points' in points:
|
||||
input_pts, output_pts = points['input_points'], points['output_points']
|
||||
|
||||
loss_ = self.loss_data(
|
||||
input_pts=input_pts, output_pts=output_pts)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
else:
|
||||
input_pts = points['input_points']
|
||||
|
||||
condition = self.problem.conditions[condition_name]
|
||||
|
||||
loss_ = self.loss_phys(
|
||||
input_pts.requires_grad_(), condition.equation)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
# clamp unknown parameters in InverseProblem (if needed)
|
||||
self._clamp_params()
|
||||
loss = sum(condition_loss)
|
||||
self.log('train_loss', loss, prog_bar=True, on_epoch=True,
|
||||
logger=True, batch_size=self.get_batch_size(batch),
|
||||
sync_dist=True)
|
||||
def optimization_cycle(self, batch):
|
||||
return self._run_optimization_cycle(batch, self.loss_phys)
|
||||
|
||||
@torch.set_grad_enabled(True)
|
||||
def validation_step(self, batch):
|
||||
losses = self._run_optimization_cycle(batch, self._residual_loss)
|
||||
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
|
||||
self.store_log('val_loss', loss, self.get_batch_size(batch))
|
||||
return loss
|
||||
|
||||
def validation_step(self, batch):
|
||||
"""
|
||||
TODO: add docstring
|
||||
"""
|
||||
condition_loss = []
|
||||
for condition_name, points in batch:
|
||||
if 'output_points' in points:
|
||||
input_pts, output_pts = points['input_points'], points['output_points']
|
||||
loss_ = self.loss_data(
|
||||
input_pts=input_pts, output_pts=output_pts)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
else:
|
||||
input_pts = points['input_points']
|
||||
|
||||
condition = self.problem.conditions[condition_name]
|
||||
with torch.set_grad_enabled(True):
|
||||
loss_ = self.loss_phys(
|
||||
input_pts.requires_grad_(), condition.equation)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
# clamp unknown parameters in InverseProblem (if needed)
|
||||
|
||||
loss = sum(condition_loss)
|
||||
self.log('val_loss', loss, on_epoch=True, prog_bar=True,
|
||||
logger=True, batch_size=self.get_batch_size(batch),
|
||||
sync_dist=True)
|
||||
@torch.set_grad_enabled(True)
|
||||
def test_step(self, batch):
|
||||
losses = self._run_optimization_cycle(batch, self._residual_loss)
|
||||
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
|
||||
self.store_log('test_loss', loss, self.get_batch_size(batch))
|
||||
return loss
|
||||
|
||||
def loss_data(self, input_pts, output_pts):
|
||||
"""
|
||||
@@ -196,11 +112,6 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
"""
|
||||
pass
|
||||
|
||||
def configure_optimizers(self):
|
||||
self._optimizer.hook(self._model)
|
||||
self.schedulers.hook(self._optimizer)
|
||||
return [self.optimizers.instance]#, self.schedulers.scheduler_instance
|
||||
|
||||
def compute_residual(self, samples, equation):
|
||||
"""
|
||||
Compute the residual for Physics Informed learning. This function
|
||||
@@ -215,52 +126,44 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
"""
|
||||
try:
|
||||
residual = equation.residual(samples, self.forward(samples))
|
||||
except (
|
||||
TypeError
|
||||
): # this occurs when the function has three inputs, i.e. inverse problem
|
||||
except TypeError:
|
||||
# this occurs when the function has three inputs (inverse problem)
|
||||
residual = equation.residual(
|
||||
samples, self.forward(samples), self._params
|
||||
samples,
|
||||
self.forward(samples),
|
||||
self._params
|
||||
)
|
||||
return residual
|
||||
|
||||
def store_log(self, loss_value):
|
||||
"""
|
||||
Stores the loss value in the logger. This function should be
|
||||
called for all conditions. It automatically handles the storing
|
||||
conditions names. It must be used
|
||||
anytime a specific variable wants to be stored for a specific condition.
|
||||
A simple example is to use the variable to store the residual.
|
||||
def _residual_loss(self, samples, equation):
|
||||
residuals = self.compute_residual(samples, equation)
|
||||
return self.loss(residuals, torch.zeros_like(residuals))
|
||||
|
||||
:param str name: The name of the loss.
|
||||
:param torch.Tensor loss_value: The value of the loss.
|
||||
"""
|
||||
batch_size = self.trainer.data_module.batch_size \
|
||||
if self.trainer.data_module.batch_size is not None else 999
|
||||
|
||||
self.log(
|
||||
self.__logged_metric + "_loss",
|
||||
loss_value,
|
||||
prog_bar=True,
|
||||
logger=True,
|
||||
on_epoch=True,
|
||||
on_step=True,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
self.__logged_res_losses.append(loss_value)
|
||||
|
||||
def save_logs_and_release(self):
|
||||
"""
|
||||
At the end of each epoch we free the stored losses. This function
|
||||
should not be override if not intentionally.
|
||||
"""
|
||||
if self.__logged_res_losses:
|
||||
# storing mean loss
|
||||
self.__logged_metric = "mean"
|
||||
self.store_log(
|
||||
sum(self.__logged_res_losses) / len(self.__logged_res_losses)
|
||||
)
|
||||
# free the logged losses
|
||||
self.__logged_res_losses = []
|
||||
def _run_optimization_cycle(self, batch, loss_residuals):
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
self.__metric = condition_name
|
||||
# if equations are passed
|
||||
if 'output_points' not in points:
|
||||
input_pts = points['input_points']
|
||||
condition = self.problem.conditions[condition_name]
|
||||
loss = loss_residuals(
|
||||
input_pts.requires_grad_(),
|
||||
condition.equation
|
||||
)
|
||||
# if data are passed
|
||||
else:
|
||||
input_pts = points['input_points']
|
||||
output_pts = points['output_points']
|
||||
loss = self.loss_data(
|
||||
input_pts=input_pts.requires_grad_(),
|
||||
output_pts=output_pts
|
||||
)
|
||||
# append loss
|
||||
condition_loss[condition_name] = loss
|
||||
# clamp unknown parameters in InverseProblem (if needed)
|
||||
self._clamp_params()
|
||||
return condition_loss
|
||||
|
||||
def _clamp_inverse_problem_params(self):
|
||||
"""
|
||||
@@ -283,8 +186,6 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
@property
|
||||
def current_condition_name(self):
|
||||
"""
|
||||
Returns the condition name. This function can be used inside the
|
||||
:meth:`loss_phys` to extract the condition at which the loss is
|
||||
computed.
|
||||
The current condition name.
|
||||
"""
|
||||
return self.__logged_metric
|
||||
return self.__metric
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
""" Module for RBAPINN. """
|
||||
""" Module for Residual-Based Attention PINN. """
|
||||
|
||||
from copy import deepcopy
|
||||
import torch
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
from .pinn import PINN
|
||||
from ...utils import check_consistency
|
||||
|
||||
@@ -66,51 +66,44 @@ class RBAPINN(PINN):
|
||||
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={"lr": 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
eta=0.001,
|
||||
gamma=0.999,
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
loss=None,
|
||||
eta=0.001,
|
||||
gamma=0.999):
|
||||
"""
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
:param float | int eta: The learning rate for the
|
||||
weights of the residual.
|
||||
:param float gamma: The decay parameter in the update of the weights
|
||||
of the residual.
|
||||
use; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
|
||||
default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
:param float | int eta: The learning rate for the weights of the
|
||||
residual; default 0.001.
|
||||
:param float gamma: The decay parameter in the update of the weights
|
||||
of the residual. Must be between 0 and 1; default 0.999.
|
||||
"""
|
||||
super().__init__(
|
||||
problem=problem,
|
||||
model=model,
|
||||
extra_features=extra_features,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
scheduler=scheduler,
|
||||
scheduler_kwargs=scheduler_kwargs,
|
||||
)
|
||||
super().__init__(model=model,
|
||||
problem=problem,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
# check consistency
|
||||
check_consistency(eta, (float, int))
|
||||
check_consistency(gamma, float)
|
||||
assert (
|
||||
0 < gamma < 1
|
||||
), f"Invalid range: expected 0 < gamma < 1, got {gamma=}"
|
||||
self.eta = eta
|
||||
self.gamma = gamma
|
||||
|
||||
@@ -120,9 +113,17 @@ class RBAPINN(PINN):
|
||||
self.weights[condition_name] = 0
|
||||
|
||||
# define vectorial loss
|
||||
self._vectorial_loss = deepcopy(loss)
|
||||
self._vectorial_loss = deepcopy(self.loss)
|
||||
self._vectorial_loss.reduction = "none"
|
||||
|
||||
# for now RBAPINN is implemented only for batch_size = None
|
||||
def on_train_start(self):
|
||||
if self.trainer.batch_size is not None:
|
||||
raise NotImplementedError("RBAPINN only works with full batch "
|
||||
"size, set batch_size=None inside the "
|
||||
"Trainer to use the solver.")
|
||||
return super().on_train_start()
|
||||
|
||||
def _vect_to_scalar(self, loss_value):
|
||||
"""
|
||||
Elaboration of the pointwise loss.
|
||||
@@ -159,16 +160,13 @@ class RBAPINN(PINN):
|
||||
cond = self.current_condition_name
|
||||
|
||||
r_norm = (
|
||||
self.eta
|
||||
* torch.abs(residual)
|
||||
self.eta * torch.abs(residual)
|
||||
/ (torch.max(torch.abs(residual)) + 1e-12)
|
||||
)
|
||||
self.weights[cond] = (self.gamma * self.weights[cond] + r_norm).detach()
|
||||
self.weights[cond] = (self.gamma*self.weights[cond] + r_norm).detach()
|
||||
|
||||
loss_value = self._vectorial_loss(
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
|
||||
self.store_log(loss_value=float(self._vect_to_scalar(loss_value)))
|
||||
|
||||
return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)
|
||||
@@ -1,30 +1,23 @@
|
||||
""" Module for Self-Adaptive PINN. """
|
||||
|
||||
import torch
|
||||
from copy import deepcopy
|
||||
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import (
|
||||
_LRScheduler as LRScheduler,
|
||||
) # torch < 2.0
|
||||
|
||||
from .pinn_interface import PINNInterface
|
||||
from pina.utils import check_consistency
|
||||
from pina.problem import InverseProblem
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
from ..solver import MultiSolverInterface
|
||||
from .pinn_interface import PINNInterface
|
||||
|
||||
|
||||
class Weights(torch.nn.Module):
|
||||
"""
|
||||
This class aims to implements the mask model for
|
||||
self adaptive weights of the Self-Adaptive
|
||||
PINN solver.
|
||||
This class aims to implements the mask model for the
|
||||
self-adaptive weights of the Self-Adaptive PINN solver.
|
||||
"""
|
||||
|
||||
def __init__(self, func):
|
||||
"""
|
||||
:param torch.nn.Module func: the mask module of SAPINN
|
||||
:param torch.nn.Module func: the mask module of SAPINN.
|
||||
"""
|
||||
super().__init__()
|
||||
check_consistency(func, torch.nn.Module)
|
||||
@@ -34,8 +27,7 @@ class Weights(torch.nn.Module):
|
||||
def forward(self):
|
||||
"""
|
||||
Forward pass implementation for the mask module.
|
||||
It returns the function on the weights
|
||||
evaluation.
|
||||
It returns the function on the weights evaluation.
|
||||
|
||||
:return: evaluation of self adaptive weights through the mask.
|
||||
:rtype: torch.Tensor
|
||||
@@ -43,10 +35,10 @@ class Weights(torch.nn.Module):
|
||||
return self.func(self.sa_weights)
|
||||
|
||||
|
||||
class SAPINN(PINNInterface):
|
||||
class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
r"""
|
||||
Self Adaptive Physics Informed Neural Network (SAPINN) solver class.
|
||||
This class implements Self-Adaptive Physics Informed Neural
|
||||
Self Adaptive Physics Informed Neural Network (SelfAdaptivePINN)
|
||||
solver class. This class implements Self-Adaptive Physics Informed Neural
|
||||
Network solvers, using a user specified ``model`` to solve a specific
|
||||
``problem``. It can be used for solving both forward and inverse problems.
|
||||
|
||||
@@ -107,97 +99,55 @@ class SAPINN(PINNInterface):
|
||||
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
weights_function=torch.nn.Sigmoid(),
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer_model=torch.optim.Adam,
|
||||
optimizer_model_kwargs={"lr": 0.001},
|
||||
optimizer_weights=torch.optim.Adam,
|
||||
optimizer_weights_kwargs={"lr": 0.001},
|
||||
scheduler_model=ConstantLR,
|
||||
scheduler_model_kwargs={"factor": 1, "total_iters": 0},
|
||||
scheduler_weights=ConstantLR,
|
||||
scheduler_weights_kwargs={"factor": 1, "total_iters": 0},
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
weight_function=torch.nn.Sigmoid(),
|
||||
optimizer_model=None,
|
||||
optimizer_weights=None,
|
||||
scheduler_model=None,
|
||||
scheduler_weights=None,
|
||||
weighting=None,
|
||||
loss=None):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use
|
||||
for the model.
|
||||
:param torch.nn.Module weights_function: The neural network model
|
||||
related to the mask of SAPINN.
|
||||
default :obj:`~torch.nn.Sigmoid`.
|
||||
:param list(torch.nn.Module) extra_features: The additional input
|
||||
features to use as augmented input. If ``None`` no extra features
|
||||
are passed. If it is a list of :class:`torch.nn.Module`,
|
||||
the extra feature list is passed to all models. If it is a list
|
||||
of extra features' lists, each single list of extra feature
|
||||
is passed to a model.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.optim.Optimizer optimizer_model: The neural
|
||||
network optimizer to use for the model network
|
||||
, default is `torch.optim.Adam`.
|
||||
:param dict optimizer_model_kwargs: Optimizer constructor keyword
|
||||
args. for the model.
|
||||
:param torch.optim.Optimizer optimizer_weights: The neural
|
||||
network optimizer to use for mask model model,
|
||||
default is `torch.optim.Adam`.
|
||||
:param dict optimizer_weights_kwargs: Optimizer constructor
|
||||
keyword args. for the mask module.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning
|
||||
rate scheduler for the model.
|
||||
:param dict scheduler_model_kwargs: LR scheduler constructor
|
||||
keyword args.
|
||||
:param torch.optim.LRScheduler scheduler_weights: Learning
|
||||
rate scheduler for the mask model.
|
||||
:param dict scheduler_model_kwargs: LR scheduler constructor
|
||||
keyword args.
|
||||
:param AbstractProblem problem: The formulation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use for
|
||||
the model.
|
||||
:param torch.nn.Module weight_function: The neural network model
|
||||
related to the Self-Adaptive PINN mask; default `torch.nn.Sigmoid()`
|
||||
:param torch.optim.Optimizer optimizer_model: The neural network
|
||||
optimizer to use for the model network; default `None`.
|
||||
:param torch.optim.Optimizer optimizer_weights: The neural network
|
||||
optimizer to use for mask model; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
||||
for the model; default `None`.
|
||||
:param torch.optim.LRScheduler scheduler_weights: Learning rate
|
||||
scheduler for the mask model; default `None`.
|
||||
:param WeightingInterface weighting: The weighting schema to use;
|
||||
default `None`.
|
||||
:param torch.nn.Module loss: The loss function to be minimized;
|
||||
default `None`.
|
||||
"""
|
||||
|
||||
# check consistency weitghs_function
|
||||
check_consistency(weights_function, torch.nn.Module)
|
||||
check_consistency(weight_function, torch.nn.Module)
|
||||
|
||||
# create models for weights
|
||||
weights_dict = {}
|
||||
for condition_name in problem.conditions:
|
||||
weights_dict[condition_name] = Weights(weights_function)
|
||||
weights_dict[condition_name] = Weights(weight_function)
|
||||
weights_dict = torch.nn.ModuleDict(weights_dict)
|
||||
|
||||
super().__init__(
|
||||
models=[model, weights_dict],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_model, optimizer_weights],
|
||||
optimizers_kwargs=[
|
||||
optimizer_model_kwargs,
|
||||
optimizer_weights_kwargs,
|
||||
],
|
||||
extra_features=extra_features,
|
||||
loss=loss,
|
||||
)
|
||||
super().__init__(models=[model, weights_dict],
|
||||
problem=problem,
|
||||
optimizers=[optimizer_model, optimizer_weights],
|
||||
schedulers=[scheduler_model, scheduler_weights],
|
||||
weighting=weighting,
|
||||
loss=loss)
|
||||
|
||||
# set automatic optimization
|
||||
# Set automatic optimization to False
|
||||
self.automatic_optimization = False
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler_model, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_model_kwargs, dict)
|
||||
check_consistency(scheduler_weights, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_weights_kwargs, dict)
|
||||
|
||||
# assign schedulers
|
||||
self._schedulers = [
|
||||
scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
|
||||
scheduler_weights(self.optimizers[1], **scheduler_weights_kwargs),
|
||||
]
|
||||
|
||||
self._model = self.models[0]
|
||||
self._weights = self.models[1]
|
||||
|
||||
self._vectorial_loss = deepcopy(loss)
|
||||
self._vectorial_loss = deepcopy(self.loss)
|
||||
self._vectorial_loss.reduction = "none"
|
||||
|
||||
def forward(self, x):
|
||||
@@ -213,7 +163,23 @@ class SAPINN(PINNInterface):
|
||||
:return: PINN solution.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
return self.neural_net(x)
|
||||
return self.model(x)
|
||||
|
||||
def training_step(self, batch):
|
||||
"""
|
||||
Solver training step, overridden to perform manual optimization.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
self.optimizer_model.instance.zero_grad()
|
||||
self.optimizer_weights.instance.zero_grad()
|
||||
loss = super().training_step(batch)
|
||||
self.optimizer_model.instance.step()
|
||||
self.optimizer_weights.instance.step()
|
||||
return loss
|
||||
|
||||
def loss_phys(self, samples, equation):
|
||||
"""
|
||||
@@ -227,86 +193,72 @@ class SAPINN(PINNInterface):
|
||||
samples and equation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# train weights
|
||||
self.optimizer_weights.zero_grad()
|
||||
weighted_loss, _ = self._loss_phys(samples, equation)
|
||||
# Train the weights
|
||||
weighted_loss = self._loss_phys(samples, equation)
|
||||
loss_value = -weighted_loss.as_subclass(torch.Tensor)
|
||||
self.manual_backward(loss_value)
|
||||
self.optimizer_weights.step()
|
||||
|
||||
# detaching samples from the computational graph to erase it and setting
|
||||
# the gradient to true to create a new computational graph.
|
||||
# Detach samples from the existing computational graph and
|
||||
# create a new one by setting requires_grad to True.
|
||||
# In alternative set `retain_graph=True`.
|
||||
samples = samples.detach()
|
||||
samples.requires_grad = True
|
||||
samples.requires_grad_()# = True
|
||||
|
||||
# train model
|
||||
self.optimizer_model.zero_grad()
|
||||
weighted_loss, loss = self._loss_phys(samples, equation)
|
||||
# Train the model
|
||||
weighted_loss = self._loss_phys(samples, equation)
|
||||
loss_value = weighted_loss.as_subclass(torch.Tensor)
|
||||
self.manual_backward(loss_value)
|
||||
self.optimizer_model.step()
|
||||
|
||||
# store loss without weights
|
||||
self.store_log(loss_value=float(loss))
|
||||
return loss_value
|
||||
|
||||
def loss_data(self, input_tensor, output_tensor):
|
||||
def loss_data(self, input_pts, output_pts):
|
||||
"""
|
||||
Computes the data loss for the SAPINN solver based on input and
|
||||
output. It computes the loss between the
|
||||
network output against the true solution.
|
||||
|
||||
:param LabelTensor input_tensor: The input to the neural networks.
|
||||
:param LabelTensor output_tensor: The true solution to compare the
|
||||
:param LabelTensor input_pts: The input to the neural networks.
|
||||
:param LabelTensor output_pts: The true solution to compare the
|
||||
network solution.
|
||||
:return: The computed data loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# train weights
|
||||
self.optimizer_weights.zero_grad()
|
||||
weighted_loss, _ = self._loss_data(input_tensor, output_tensor)
|
||||
loss_value = -weighted_loss.as_subclass(torch.Tensor)
|
||||
residual = self.forward(input_pts) - output_pts
|
||||
loss = self._vectorial_loss(
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
loss_value = self._vect_to_scalar(loss).as_subclass(torch.Tensor)
|
||||
self.manual_backward(loss_value)
|
||||
self.optimizer_weights.step()
|
||||
|
||||
# detaching samples from the computational graph to erase it and setting
|
||||
# the gradient to true to create a new computational graph.
|
||||
# In alternative set `retain_graph=True`.
|
||||
input_tensor = input_tensor.detach()
|
||||
input_tensor.requires_grad = True
|
||||
|
||||
# train model
|
||||
self.optimizer_model.zero_grad()
|
||||
weighted_loss, loss = self._loss_data(input_tensor, output_tensor)
|
||||
loss_value = weighted_loss.as_subclass(torch.Tensor)
|
||||
self.manual_backward(loss_value)
|
||||
self.optimizer_model.step()
|
||||
|
||||
# store loss without weights
|
||||
self.store_log(loss_value=float(loss))
|
||||
return loss_value
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""
|
||||
Optimizer configuration for the SAPINN
|
||||
solver.
|
||||
Optimizer configuration for the SelfAdaptive PINN solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
# if the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters that the optimizer needs to optimize
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized
|
||||
self.optimizer_model.hook(self.model.parameters())
|
||||
self.optimizer_weights.hook(self.weights_dict.parameters())
|
||||
if isinstance(self.problem, InverseProblem):
|
||||
self.optimizers[0].add_param_group(
|
||||
{
|
||||
"params": [
|
||||
self._params[var]
|
||||
for var in self.problem.unknown_variables
|
||||
]
|
||||
}
|
||||
)
|
||||
return self.optimizers, self._schedulers
|
||||
self.optimizer_model.instance.add_param_group(
|
||||
{
|
||||
"params": [
|
||||
self._params[var]
|
||||
for var in self.problem.unknown_variables
|
||||
]
|
||||
}
|
||||
)
|
||||
self.scheduler_model.hook(self.optimizer_model)
|
||||
self.scheduler_weights.hook(self.optimizer_weights)
|
||||
return (
|
||||
[self.optimizer_model.instance,
|
||||
self.optimizer_weights.instance],
|
||||
[self.scheduler_model.instance,
|
||||
self.scheduler_weights.instance]
|
||||
)
|
||||
|
||||
def on_train_batch_end(self, outputs, batch, batch_idx):
|
||||
"""
|
||||
@@ -322,9 +274,11 @@ class SAPINN(PINNInterface):
|
||||
:rtype: Any
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
|
||||
1
|
||||
)
|
||||
(
|
||||
self.trainer.fit_loop.epoch_loop.manual_optimization
|
||||
.optim_step_progress.total.completed
|
||||
) += 1
|
||||
|
||||
return super().on_train_batch_end(outputs, batch, batch_idx)
|
||||
|
||||
def on_train_start(self):
|
||||
@@ -336,32 +290,45 @@ class SAPINN(PINNInterface):
|
||||
method ``on_train_start``.
|
||||
:rtype: Any
|
||||
"""
|
||||
if self.trainer.batch_size is not None:
|
||||
raise NotImplementedError("SelfAdaptivePINN only works with full "
|
||||
"batch size, set batch_size=None inside "
|
||||
"the Trainer to use the solver.")
|
||||
device = torch.device(
|
||||
self.trainer._accelerator_connector._accelerator_flag
|
||||
)
|
||||
for condition_name, tensor in self.problem.input_pts.items():
|
||||
self.weights_dict.torchmodel[condition_name].sa_weights.data = (
|
||||
|
||||
# Initialize the self adaptive weights only for training points
|
||||
for condition_name, tensor in (
|
||||
self.trainer.data_module.train_dataset.input_points.items()
|
||||
):
|
||||
self.weights_dict[condition_name].sa_weights.data = (
|
||||
torch.rand((tensor.shape[0], 1), device=device)
|
||||
)
|
||||
return super().on_train_start()
|
||||
|
||||
def on_load_checkpoint(self, checkpoint):
|
||||
"""
|
||||
Overriding the Pytorch Lightning ``on_load_checkpoint`` to handle
|
||||
checkpoints for Self Adaptive Weights. This method should not be
|
||||
Override the Pytorch Lightning ``on_load_checkpoint`` to handle
|
||||
checkpoints for Self-Adaptive Weights. This method should not be
|
||||
overridden if not intentionally.
|
||||
|
||||
:param dict checkpoint: Pytorch Lightning checkpoint dict.
|
||||
"""
|
||||
for condition_name, tensor in self.problem.input_pts.items():
|
||||
self.weights_dict.torchmodel[condition_name].sa_weights.data = (
|
||||
torch.rand((tensor.shape[0], 1))
|
||||
# First initialize self-adaptive weights with correct shape,
|
||||
# then load the values from the checkpoint.
|
||||
for condition_name, _ in self.problem.input_pts.items():
|
||||
shape = checkpoint['state_dict'][
|
||||
f"_pina_models.1.{condition_name}.sa_weights"
|
||||
].shape
|
||||
self.weights_dict[condition_name].sa_weights.data = (
|
||||
torch.rand(shape)
|
||||
)
|
||||
return super().on_load_checkpoint(checkpoint)
|
||||
|
||||
def _loss_phys(self, samples, equation):
|
||||
"""
|
||||
Elaboration of the physical loss for the SAPINN solver.
|
||||
Computation of the physical loss for SelfAdaptive PINN solver.
|
||||
|
||||
:param LabelTensor samples: Input samples to evaluate the physics loss.
|
||||
:param EquationInterface equation: the governing equation representing
|
||||
@@ -371,43 +338,11 @@ class SAPINN(PINNInterface):
|
||||
:rtype: List[LabelTensor, LabelTensor]
|
||||
"""
|
||||
residual = self.compute_residual(samples, equation)
|
||||
return self._compute_loss(residual)
|
||||
|
||||
def _loss_data(self, input_tensor, output_tensor):
|
||||
"""
|
||||
Elaboration of the loss related to data for the SAPINN solver.
|
||||
|
||||
:param LabelTensor input_tensor: The input to the neural networks.
|
||||
:param LabelTensor output_tensor: The true solution to compare the
|
||||
network solution.
|
||||
|
||||
:return: tuple with weighted and not weighted scalar loss
|
||||
:rtype: List[LabelTensor, LabelTensor]
|
||||
"""
|
||||
residual = self.forward(input_tensor) - output_tensor
|
||||
return self._compute_loss(residual)
|
||||
|
||||
def _compute_loss(self, residual):
|
||||
"""
|
||||
Elaboration of the pointwise loss through the mask model and the
|
||||
self adaptive weights
|
||||
|
||||
:param LabelTensor residual: the matrix of residuals that have to
|
||||
be weighted
|
||||
|
||||
:return: tuple with weighted and not weighted loss
|
||||
:rtype List[LabelTensor, LabelTensor]
|
||||
"""
|
||||
weights = self.weights_dict.torchmodel[
|
||||
self.current_condition_name
|
||||
].forward()
|
||||
weights = self.weights_dict[self.current_condition_name].forward()
|
||||
loss_value = self._vectorial_loss(
|
||||
torch.zeros_like(residual, requires_grad=True), residual
|
||||
)
|
||||
return (
|
||||
self._vect_to_scalar(weights * loss_value),
|
||||
self._vect_to_scalar(loss_value),
|
||||
)
|
||||
return self._vect_to_scalar(weights * loss_value)
|
||||
|
||||
def _vect_to_scalar(self, loss_value):
|
||||
"""
|
||||
@@ -431,12 +366,14 @@ class SAPINN(PINNInterface):
|
||||
return ret
|
||||
|
||||
@property
|
||||
def neural_net(self):
|
||||
def model(self):
|
||||
"""
|
||||
Returns the neural network model.
|
||||
Return the mask models associate to the application of
|
||||
the mask to the self adaptive weights for each loss that
|
||||
compones the global loss of the problem.
|
||||
|
||||
:return: The neural network model.
|
||||
:rtype: torch.nn.Module
|
||||
:return: The ModuleDict for mask models.
|
||||
:rtype: torch.nn.ModuleDict
|
||||
"""
|
||||
return self.models[0]
|
||||
|
||||
@@ -460,7 +397,7 @@ class SAPINN(PINNInterface):
|
||||
:return: The scheduler for the neural network model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
"""
|
||||
return self._scheduler[0]
|
||||
return self.schedulers[0]
|
||||
|
||||
@property
|
||||
def scheduler_weights(self):
|
||||
@@ -470,7 +407,7 @@ class SAPINN(PINNInterface):
|
||||
:return: The scheduler for the mask model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
"""
|
||||
return self._scheduler[1]
|
||||
return self.schedulers[1]
|
||||
|
||||
@property
|
||||
def optimizer_model(self):
|
||||
@@ -88,11 +88,11 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
problem,
|
||||
reduction_network,
|
||||
interpolation_network,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={"lr": 0.001},
|
||||
scheduler=torch.optim.lr_scheduler.ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
use_lt=True,
|
||||
):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
@@ -105,15 +105,12 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
the ``reduction_network`` encoding.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param float lr: The learning rate; default is 0.001.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
:param WeightingInterface weighting: The loss weighting to use.
|
||||
:param bool use_lt: Using LabelTensors as input during training.
|
||||
"""
|
||||
model = torch.nn.ModuleDict(
|
||||
{
|
||||
@@ -127,19 +124,19 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
problem=problem,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
optimizer_kwargs=optimizer_kwargs,
|
||||
scheduler=scheduler,
|
||||
scheduler_kwargs=scheduler_kwargs,
|
||||
weighting=weighting,
|
||||
use_lt=use_lt
|
||||
)
|
||||
|
||||
# assert reduction object contains encode/ decode
|
||||
if not hasattr(self.neural_net["reduction_network"], "encode"):
|
||||
if not hasattr(self.model["reduction_network"], "encode"):
|
||||
raise SyntaxError(
|
||||
"reduction_network must have encode method. "
|
||||
"The encode method should return a lower "
|
||||
"dimensional representation of the input."
|
||||
)
|
||||
if not hasattr(self.neural_net["reduction_network"], "decode"):
|
||||
if not hasattr(self.model["reduction_network"], "decode"):
|
||||
raise SyntaxError(
|
||||
"reduction_network must have decode method. "
|
||||
"The decode method should return a high "
|
||||
@@ -157,8 +154,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
:return: Solver solution.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
reduction_network = self.neural_net["reduction_network"]
|
||||
interpolation_network = self.neural_net["interpolation_network"]
|
||||
reduction_network = self.model["reduction_network"]
|
||||
interpolation_network = self.model["interpolation_network"]
|
||||
return reduction_network.decode(interpolation_network(x))
|
||||
|
||||
def loss_data(self, input_pts, output_pts):
|
||||
@@ -175,8 +172,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# extract networks
|
||||
reduction_network = self.neural_net["reduction_network"]
|
||||
interpolation_network = self.neural_net["interpolation_network"]
|
||||
reduction_network = self.model["reduction_network"]
|
||||
interpolation_network = self.model["interpolation_network"]
|
||||
# encoded representations loss
|
||||
encode_repr_inter_net = interpolation_network(input_pts)
|
||||
encode_repr_reduction_network = reduction_network.encode(output_pts)
|
||||
@@ -189,11 +186,3 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
)
|
||||
|
||||
return loss_encode + loss_reconstruction
|
||||
|
||||
@property
|
||||
def neural_net(self):
|
||||
"""
|
||||
Neural network for training. It returns a :obj:`~torch.nn.ModuleDict`
|
||||
containing the ``reduction_network`` and ``interpolation_network``.
|
||||
"""
|
||||
return self._neural_net.torchmodel
|
||||
|
||||
@@ -1,102 +1,420 @@
|
||||
""" Solver module. """
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from ..model.network import Network
|
||||
import lightning
|
||||
from ..utils import check_consistency
|
||||
from ..problem import AbstractProblem
|
||||
from ..optim import Optimizer, Scheduler
|
||||
import torch
|
||||
import sys
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from ..problem import AbstractProblem
|
||||
from ..optim import Optimizer, Scheduler, TorchOptimizer, TorchScheduler
|
||||
from ..loss import WeightingInterface
|
||||
from ..loss.scalar_weighting import _NoWeighting
|
||||
from ..utils import check_consistency, labelize_forward
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Solver base class. This class inherits is a wrapper of
|
||||
LightningModule class, inheriting all the
|
||||
LightningModule methods.
|
||||
SolverInterface base class. This class is a wrapper of LightningModule.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
models,
|
||||
problem,
|
||||
optimizers,
|
||||
schedulers,
|
||||
use_lt=True):
|
||||
weighting,
|
||||
use_lt):
|
||||
"""
|
||||
:param model: A torch neural network model instance.
|
||||
:type model: torch.nn.Module
|
||||
:param problem: A problem definition instance.
|
||||
:type problem: AbstractProblem
|
||||
:param list(torch.optim.Optimizer) optimizer: A list of neural network
|
||||
optimizers to use.
|
||||
:param weighting: The loss weighting to use.
|
||||
:type weighting: WeightingInterface
|
||||
:param use_lt: Using LabelTensors as input during training.
|
||||
:type use_lt: bool
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
# check consistency of the inputs
|
||||
# check consistency of the problem
|
||||
check_consistency(problem, AbstractProblem)
|
||||
self._check_solver_consistency(problem)
|
||||
|
||||
# Check consistency of models argument and encapsulate in list
|
||||
if not isinstance(models, list):
|
||||
check_consistency(models, torch.nn.Module)
|
||||
# put everything in a list if only one input
|
||||
models = [models]
|
||||
else:
|
||||
for idx in range(len(models)):
|
||||
# Check consistency
|
||||
check_consistency(models[idx], torch.nn.Module)
|
||||
len_model = len(models)
|
||||
|
||||
# If use_lt is true add extract operation in input
|
||||
if use_lt is True:
|
||||
for idx, model in enumerate(models):
|
||||
models[idx] = Network(
|
||||
model=model,
|
||||
input_variables=problem.input_variables,
|
||||
output_variables=problem.output_variables,
|
||||
)
|
||||
|
||||
# Check scheduler consistency + encapsulation
|
||||
if not isinstance(schedulers, list):
|
||||
check_consistency(schedulers, Scheduler)
|
||||
schedulers = [schedulers]
|
||||
else:
|
||||
for scheduler in schedulers:
|
||||
check_consistency(scheduler, Scheduler)
|
||||
|
||||
# Check optimizer consistency + encapsulation
|
||||
if not isinstance(optimizers, list):
|
||||
check_consistency(optimizers, Optimizer)
|
||||
optimizers = [optimizers]
|
||||
else:
|
||||
for optimizer in optimizers:
|
||||
check_consistency(optimizer, Optimizer)
|
||||
len_optimizer = len(optimizers)
|
||||
|
||||
# check length consistency optimizers
|
||||
if len_model != len_optimizer:
|
||||
raise ValueError("You must define one optimizer for each model."
|
||||
f"Got {len_model} models, and {len_optimizer}"
|
||||
" optimizers.")
|
||||
|
||||
# extra features handling
|
||||
self._pina_models = models
|
||||
self._pina_optimizers = optimizers
|
||||
self._pina_schedulers = schedulers
|
||||
self._pina_problem = problem
|
||||
|
||||
# check consistency of the weighting and hook the condition names
|
||||
if weighting is None:
|
||||
weighting = _NoWeighting()
|
||||
check_consistency(weighting, WeightingInterface)
|
||||
self._pina_weighting = weighting
|
||||
weighting.condition_names = list(self._pina_problem.conditions.keys())
|
||||
|
||||
# check consistency use_lt
|
||||
check_consistency(use_lt, bool)
|
||||
self._use_lt = use_lt
|
||||
|
||||
# if use_lt is true add extract operation in input
|
||||
if use_lt is True:
|
||||
self.forward = labelize_forward(
|
||||
forward=self.forward,
|
||||
input_variables=problem.input_variables,
|
||||
output_variables=problem.output_variables,
|
||||
)
|
||||
|
||||
# PINA private attributes (some are overridden by derived classes)
|
||||
self._pina_problem = problem
|
||||
self._pina_models = None
|
||||
self._pina_optimizers = None
|
||||
self._pina_schedulers = None
|
||||
|
||||
def _check_solver_consistency(self, problem):
|
||||
for condition in problem.conditions.values():
|
||||
check_consistency(condition, self.accepted_conditions_types)
|
||||
|
||||
def _optimization_cycle(self, batch):
|
||||
"""
|
||||
Perform a private optimization cycle by computing the loss for each
|
||||
condition in the given batch. The loss are later aggregated using the
|
||||
specific weighting schema.
|
||||
|
||||
:param batch: A batch of data, where each element is a tuple containing
|
||||
a condition name and a dictionary of points.
|
||||
:type batch: list of tuples (str, dict)
|
||||
:return: The computed loss for the all conditions in the batch,
|
||||
cast to a subclass of `torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict(torch.Tensor)
|
||||
"""
|
||||
losses = self.optimization_cycle(batch)
|
||||
for name, value in losses.items():
|
||||
self.store_log(f'{name}_loss', value.item(), self.get_batch_size(batch))
|
||||
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
|
||||
return loss
|
||||
|
||||
def training_step(self, batch):
|
||||
"""
|
||||
Solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
loss = self._optimization_cycle(batch=batch)
|
||||
self.store_log('train_loss', loss, self.get_batch_size(batch))
|
||||
return loss
|
||||
|
||||
def validation_step(self, batch):
|
||||
"""
|
||||
Solver validation step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
"""
|
||||
loss = self._optimization_cycle(batch=batch)
|
||||
self.store_log('val_loss', loss, self.get_batch_size(batch))
|
||||
|
||||
def test_step(self, batch):
|
||||
"""
|
||||
Solver test step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
"""
|
||||
loss = self._optimization_cycle(batch=batch)
|
||||
self.store_log('test_loss', loss, self.get_batch_size(batch))
|
||||
|
||||
def store_log(self, name, value, batch_size):
|
||||
self.log(name=name,
|
||||
value=value,
|
||||
batch_size=batch_size,
|
||||
**self.trainer.logging_kwargs
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def training_step(self, batch):
|
||||
def optimization_cycle(self, batch):
|
||||
"""
|
||||
Perform an optimization cycle by computing the loss for each condition
|
||||
in the given batch.
|
||||
|
||||
:param batch: A batch of data, where each element is a tuple containing
|
||||
a condition name and a dictionary of points.
|
||||
:type batch: list of tuples (str, dict)
|
||||
:return: The computed loss for the all conditions in the batch,
|
||||
cast to a subclass of `torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict(torch.Tensor)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
@property
|
||||
def problem(self):
|
||||
"""
|
||||
The problem formulation.
|
||||
"""
|
||||
return self._pina_problem
|
||||
|
||||
@property
|
||||
def use_lt(self):
|
||||
"""
|
||||
Using LabelTensor in training.
|
||||
"""
|
||||
return self._use_lt
|
||||
|
||||
@property
|
||||
def weighting(self):
|
||||
"""
|
||||
The weighting mechanism.
|
||||
"""
|
||||
return self._pina_weighting
|
||||
|
||||
@staticmethod
|
||||
def get_batch_size(batch):
|
||||
# assuming batch is a custom Batch object
|
||||
batch_size = 0
|
||||
for data in batch:
|
||||
batch_size += len(data[1]['input_points'])
|
||||
return batch_size
|
||||
|
||||
@staticmethod
|
||||
def default_torch_optimizer():
|
||||
return TorchOptimizer(torch.optim.Adam, lr=0.001)
|
||||
|
||||
@staticmethod
|
||||
def default_torch_scheduler():
|
||||
return TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
|
||||
|
||||
def on_train_start(self):
|
||||
"""
|
||||
Hook that is called before training begins.
|
||||
Used to compile the model if the trainer is set to compile.
|
||||
"""
|
||||
super().on_train_start()
|
||||
if self.trainer.compile:
|
||||
self._compile_model()
|
||||
|
||||
def on_test_start(self):
|
||||
"""
|
||||
Hook that is called before training begins.
|
||||
Used to compile the model if the trainer is set to compile.
|
||||
"""
|
||||
super().on_train_start()
|
||||
if self.trainer.compile and not self._check_already_compiled():
|
||||
self._compile_model()
|
||||
|
||||
def _check_already_compiled(self):
|
||||
models = self._pina_models
|
||||
if len(models) == 1 and isinstance(self._pina_models[0],
|
||||
torch.nn.ModuleDict):
|
||||
models = list(self._pina_models.values())
|
||||
for model in models:
|
||||
if not isinstance(model, (OptimizedModule, torch.nn.ModuleDict)):
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _perform_compilation(model):
|
||||
model_device = next(model.parameters()).device
|
||||
try:
|
||||
if model_device == torch.device("mps:0"):
|
||||
model = torch.compile(model, backend="eager")
|
||||
else:
|
||||
model = torch.compile(model, backend="inductor")
|
||||
except Exception as e:
|
||||
print("Compilation failed, running in normal mode.:\n", e)
|
||||
return model
|
||||
|
||||
|
||||
class SingleSolverInterface(SolverInterface):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
use_lt=True):
|
||||
"""
|
||||
:param problem: A problem definition instance.
|
||||
:type problem: AbstractProblem
|
||||
:param model: A torch nn.Module instances.
|
||||
:type model: torch.nn.Module
|
||||
:param Optimizer optimizers: A neural network optimizers to use.
|
||||
:param Scheduler optimizers: A neural network scheduler to use.
|
||||
:param WeightingInterface weighting: The loss weighting to use.
|
||||
:param bool use_lt: Using LabelTensors as input during training.
|
||||
"""
|
||||
if optimizer is None:
|
||||
optimizer = self.default_torch_optimizer()
|
||||
|
||||
if scheduler is None:
|
||||
scheduler = self.default_torch_scheduler()
|
||||
|
||||
super().__init__(problem=problem,
|
||||
use_lt=use_lt,
|
||||
weighting=weighting)
|
||||
|
||||
# check consistency of models argument and encapsulate in list
|
||||
check_consistency(model, torch.nn.Module)
|
||||
# check scheduler consistency and encapsulate in list
|
||||
check_consistency(scheduler, Scheduler)
|
||||
# check optimizer consistency and encapsulate in list
|
||||
check_consistency(optimizer, Optimizer)
|
||||
|
||||
# initialize the model (needed by Lightining to go to different devices)
|
||||
self._pina_models = torch.nn.ModuleList([model])
|
||||
self._pina_optimizers = [optimizer]
|
||||
self._pina_schedulers = [scheduler]
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward pass implementation for the solver.
|
||||
|
||||
:param torch.Tensor x: Input tensor.
|
||||
:return: Solver solution.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
x = self.model(x)
|
||||
return x
|
||||
|
||||
def configure_optimizers(self):
|
||||
raise NotImplementedError
|
||||
"""
|
||||
Optimizer configuration for the solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
self.optimizer.hook(self.model.parameters())
|
||||
self.scheduler.hook(self.optimizer)
|
||||
return (
|
||||
[self.optimizer.instance],
|
||||
[self.scheduler.instance]
|
||||
)
|
||||
|
||||
def _compile_model(self):
|
||||
if isinstance(self._pina_models[0], torch.nn.ModuleDict):
|
||||
self._compile_module_dict()
|
||||
else:
|
||||
self._compile_single_model()
|
||||
|
||||
def _compile_module_dict(self):
|
||||
for name, model in self._pina_models[0].items():
|
||||
self._pina_models[0][name] = self._perform_compilation(model)
|
||||
|
||||
def _compile_single_model(self):
|
||||
self._pina_models[0] = self._perform_compilation(self._pina_models[0])
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
"""
|
||||
Model for training.
|
||||
"""
|
||||
return self._pina_models[0]
|
||||
|
||||
@property
|
||||
def scheduler(self):
|
||||
"""
|
||||
Scheduler for training.
|
||||
"""
|
||||
return self._pina_schedulers[0]
|
||||
|
||||
@property
|
||||
def optimizer(self):
|
||||
"""
|
||||
Optimizer for training.
|
||||
"""
|
||||
return self._pina_optimizers[0]
|
||||
|
||||
|
||||
class MultiSolverInterface(SolverInterface):
|
||||
"""
|
||||
Multiple Solver base class. This class inherits is a wrapper of
|
||||
SolverInterface class
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
problem,
|
||||
models,
|
||||
optimizers=None,
|
||||
schedulers=None,
|
||||
weighting=None,
|
||||
use_lt=True):
|
||||
"""
|
||||
:param problem: A problem definition instance.
|
||||
:type problem: AbstractProblem
|
||||
:param models: Multiple torch nn.Module instances.
|
||||
:type model: list[torch.nn.Module] | tuple[torch.nn.Module]
|
||||
:param list(Optimizer) optimizers: A list of neural network
|
||||
optimizers to use.
|
||||
:param list(Scheduler) optimizers: A list of neural network
|
||||
schedulers to use.
|
||||
:param WeightingInterface weighting: The loss weighting to use.
|
||||
:param bool use_lt: Using LabelTensors as input during training.
|
||||
"""
|
||||
if not isinstance(models, (list, tuple)) or len(models) < 2:
|
||||
raise ValueError(
|
||||
'models should be list[torch.nn.Module] or '
|
||||
'tuple[torch.nn.Module] with len greater than '
|
||||
'one.'
|
||||
)
|
||||
|
||||
if any(opt is None for opt in optimizers):
|
||||
optimizers = [
|
||||
self.default_torch_optimizer() if opt is None else opt
|
||||
for opt in optimizers
|
||||
]
|
||||
|
||||
if any(sched is None for sched in schedulers):
|
||||
schedulers = [
|
||||
self.default_torch_scheduler() if sched is None else sched
|
||||
for sched in schedulers
|
||||
]
|
||||
|
||||
super().__init__(problem=problem,
|
||||
use_lt=use_lt,
|
||||
weighting=weighting)
|
||||
|
||||
# check consistency of models argument and encapsulate in list
|
||||
check_consistency(models, torch.nn.Module)
|
||||
|
||||
# check scheduler consistency and encapsulate in list
|
||||
check_consistency(schedulers, Scheduler)
|
||||
|
||||
# check optimizer consistency and encapsulate in list
|
||||
check_consistency(optimizers, Optimizer)
|
||||
|
||||
# check length consistency optimizers
|
||||
if len(models) != len(optimizers):
|
||||
raise ValueError(
|
||||
"You must define one optimizer for each model."
|
||||
f"Got {len(models)} models, and {len(optimizers)}"
|
||||
" optimizers."
|
||||
)
|
||||
|
||||
# initialize the model
|
||||
self._pina_models = torch.nn.ModuleList(models)
|
||||
self._pina_optimizers = optimizers
|
||||
self._pina_schedulers = schedulers
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""Optimizer configuration for the solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
for optimizer, scheduler, model in zip(self.optimizers,
|
||||
self.schedulers,
|
||||
self.models):
|
||||
optimizer.hook(model.parameters())
|
||||
scheduler.hook(optimizer)
|
||||
|
||||
return (
|
||||
[optimizer.instance for optimizer in self.optimizers],
|
||||
[scheduler.instance for scheduler in self.schedulers]
|
||||
)
|
||||
|
||||
def _compile_model(self):
|
||||
for i, model in enumerate(self._pina_models):
|
||||
if not isinstance(model, torch.nn.ModuleDict):
|
||||
self._pina_models[i] = self._perform_compilation(model)
|
||||
|
||||
@property
|
||||
def models(self):
|
||||
@@ -111,33 +429,7 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
return self._pina_optimizers
|
||||
|
||||
@property
|
||||
def problem(self):
|
||||
def schedulers(self):
|
||||
"""
|
||||
The problem formulation."""
|
||||
return self._pina_problem
|
||||
|
||||
def on_train_start(self):
|
||||
"""
|
||||
On training epoch start this function is call to do global checks for
|
||||
the different solvers.
|
||||
"""
|
||||
|
||||
# 1. Check the verison for dataloader
|
||||
dataloader = self.trainer.train_dataloader
|
||||
if sys.version_info < (3, 8):
|
||||
dataloader = dataloader.loaders
|
||||
self._dataloader = dataloader
|
||||
|
||||
return super().on_train_start()
|
||||
|
||||
@staticmethod
|
||||
def get_batch_size(batch):
|
||||
# Assuming batch is your custom Batch object
|
||||
batch_size = 0
|
||||
for data in batch:
|
||||
batch_size += len(data[1]['input_points'])
|
||||
return batch_size
|
||||
|
||||
def _check_solver_consistency(self, problem):
|
||||
for condition in problem.conditions.values():
|
||||
check_consistency(condition, self.accepted_conditions_types)
|
||||
The torch model."""
|
||||
return self._pina_schedulers
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
""" Module for SupervisedSolver """
|
||||
import torch
|
||||
from torch.nn.modules.loss import _Loss
|
||||
from ..optim import TorchOptimizer, TorchScheduler
|
||||
from .solver import SolverInterface
|
||||
from ..label_tensor import LabelTensor
|
||||
from .solver import SingleSolverInterface
|
||||
from ..utils import check_consistency
|
||||
from ..loss.loss_interface import LossInterface
|
||||
from ..condition import InputOutputPointsCondition
|
||||
|
||||
|
||||
class SupervisedSolver(SolverInterface):
|
||||
class SupervisedSolver(SingleSolverInterface):
|
||||
r"""
|
||||
SupervisedSolver solver class. This class implements a SupervisedSolver,
|
||||
using a user specified ``model`` to solve a specific ``problem``.
|
||||
@@ -46,110 +44,54 @@ class SupervisedSolver(SolverInterface):
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
extra_features=None,
|
||||
weighting=None,
|
||||
use_lt=True):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param WeightingInterface weighting: The loss weighting to use.
|
||||
:param bool use_lt: Using LabelTensors as input during training.
|
||||
"""
|
||||
if loss is None:
|
||||
loss = torch.nn.MSELoss()
|
||||
|
||||
if optimizer is None:
|
||||
optimizer = TorchOptimizer(torch.optim.Adam, lr=0.001)
|
||||
|
||||
if scheduler is None:
|
||||
scheduler = TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
|
||||
|
||||
super().__init__(models=model,
|
||||
super().__init__(model=model,
|
||||
problem=problem,
|
||||
optimizers=optimizer,
|
||||
schedulers=scheduler,
|
||||
extra_features=extra_features,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
use_lt=use_lt)
|
||||
|
||||
# check consistency
|
||||
check_consistency(loss, (LossInterface, _Loss, torch.nn.Module),
|
||||
subclass=False)
|
||||
self._loss = loss
|
||||
self._model = self._pina_models[0]
|
||||
self._optimizer = self._pina_optimizers[0]
|
||||
self._scheduler = self._pina_schedulers[0]
|
||||
self.validation_condition_losses = {
|
||||
k: {'loss': [],
|
||||
'count': []} for k in self.problem.conditions.keys()}
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass implementation for the solver.
|
||||
|
||||
:param torch.Tensor x: Input tensor.
|
||||
:return: Solver solution.
|
||||
:rtype: torch.Tensor
|
||||
def optimization_cycle(self, batch):
|
||||
"""
|
||||
Perform an optimization cycle by computing the loss for each condition
|
||||
in the given batch.
|
||||
|
||||
output = self._model(x)
|
||||
|
||||
output.labels = self.problem.output_variables
|
||||
return output
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""Optimizer configuration for the solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
:param batch: A batch of data, where each element is a tuple containing
|
||||
a condition name and a dictionary of points.
|
||||
:type batch: list of tuples (str, dict)
|
||||
:return: The computed loss for the all conditions in the batch,
|
||||
cast to a subclass of `torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict(torch.Tensor)
|
||||
"""
|
||||
self._optimizer.hook(self._model.parameters())
|
||||
self._scheduler.hook(self._optimizer)
|
||||
return ([self._optimizer.optimizer_instance],
|
||||
[self._scheduler.scheduler_instance])
|
||||
|
||||
def training_step(self, batch):
|
||||
"""Solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:param batch_idx: The batch index.
|
||||
:type batch_idx: int
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
condition_loss = []
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
input_pts, output_pts = points['input_points'], points['output_points']
|
||||
loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
loss = sum(condition_loss)
|
||||
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True,
|
||||
batch_size=self.get_batch_size(batch), sync_dist=True)
|
||||
return loss
|
||||
|
||||
def validation_step(self, batch):
|
||||
"""
|
||||
Solver validation step.
|
||||
"""
|
||||
condition_loss = []
|
||||
for condition_name, points in batch:
|
||||
input_pts, output_pts = points['input_points'], points['output_points']
|
||||
loss_ = self.loss_data(input_pts=input_pts, output_pts=output_pts)
|
||||
condition_loss.append(loss_.as_subclass(torch.Tensor))
|
||||
loss = sum(condition_loss)
|
||||
self.log('val_loss', loss, prog_bar=True, logger=True,
|
||||
batch_size=self.get_batch_size(batch), sync_dist=True)
|
||||
|
||||
def test_step(self, batch, batch_idx):
|
||||
"""
|
||||
Solver test step.
|
||||
"""
|
||||
|
||||
raise NotImplementedError("Test step not implemented yet.")
|
||||
condition_loss[condition_name] = self.loss_data(
|
||||
input_pts=input_pts, output_pts=output_pts)
|
||||
return condition_loss
|
||||
|
||||
def loss_data(self, input_pts, output_pts):
|
||||
"""
|
||||
@@ -157,35 +99,16 @@ class SupervisedSolver(SolverInterface):
|
||||
the network output against the true solution. This function
|
||||
should not be override if not intentionally.
|
||||
|
||||
:param LabelTensor input_pts: The input to the neural networks.
|
||||
:param LabelTensor output_pts: The true solution to compare the
|
||||
:param input_pts: The input to the neural networks.
|
||||
:type input_pts: LabelTensor | torch.Tensor
|
||||
:param output_pts: The true solution to compare the
|
||||
network solution.
|
||||
:return: The residual loss averaged on the input coordinates
|
||||
:type output_pts: LabelTensor | torch.Tensor
|
||||
:return: The residual loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return self._loss(self.forward(input_pts), output_pts)
|
||||
|
||||
@property
|
||||
def scheduler(self):
|
||||
"""
|
||||
Scheduler for training.
|
||||
"""
|
||||
return self._scheduler
|
||||
|
||||
@property
|
||||
def optimizer(self):
|
||||
"""
|
||||
Optimizer for training.
|
||||
"""
|
||||
return self._optimizer
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
"""
|
||||
Neural network for training.
|
||||
"""
|
||||
return self._model
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
"""
|
||||
|
||||
117
pina/trainer.py
117
pina/trainer.py
@@ -1,9 +1,10 @@
|
||||
""" Trainer module. """
|
||||
import sys
|
||||
import torch
|
||||
import lightning
|
||||
from .utils import check_consistency
|
||||
from .data import PinaDataModule
|
||||
from .solvers.solver import SolverInterface
|
||||
from .solvers import SolverInterface, PINNInterface
|
||||
|
||||
|
||||
class Trainer(lightning.pytorch.Trainer):
|
||||
@@ -14,29 +15,88 @@ class Trainer(lightning.pytorch.Trainer):
|
||||
train_size=.7,
|
||||
test_size=.2,
|
||||
val_size=.1,
|
||||
predict_size=.0,
|
||||
predict_size=0.,
|
||||
compile=None,
|
||||
automatic_batching=None,
|
||||
**kwargs):
|
||||
"""
|
||||
PINA Trainer class for costumizing every aspect of training via flags.
|
||||
|
||||
:param solver: A pina:class:`SolverInterface` solver for the differential problem.
|
||||
:param solver: A pina:class:`SolverInterface` solver for the
|
||||
differential problem.
|
||||
:type solver: SolverInterface
|
||||
:param batch_size: How many samples per batch to load. If ``batch_size=None`` all
|
||||
:param batch_size: How many samples per batch to load.
|
||||
If ``batch_size=None`` all
|
||||
samples are loaded and data are not batched, defaults to None.
|
||||
:type batch_size: int | None
|
||||
:param train_size: percentage of elements in the train dataset
|
||||
:type train_size: float
|
||||
:param test_size: percentage of elements in the test dataset
|
||||
:type test_size: float
|
||||
:param val_size: percentage of elements in the val dataset
|
||||
:type val_size: float
|
||||
:param predict_size: percentage of elements in the predict dataset
|
||||
:type predict_size: float
|
||||
:param compile: if True model is compiled before training,
|
||||
default False. For Windows users compilation is always disabled.
|
||||
:type compile: bool
|
||||
:param automatic_batching: if True automatic PyTorch batching is
|
||||
performed. Please avoid using automatic batching when batch_size is
|
||||
large, default False.
|
||||
:type automatic_batching: bool
|
||||
|
||||
:Keyword Arguments:
|
||||
The additional keyword arguments specify the training setup
|
||||
and can be choosen from the `pytorch-lightning
|
||||
Trainer API <https://lightning.ai/docs/pytorch/stable/common/trainer.html#trainer-class-api>`_
|
||||
"""
|
||||
# check consistency for init types
|
||||
check_consistency(solver, SolverInterface)
|
||||
check_consistency(train_size, float)
|
||||
check_consistency(test_size, float)
|
||||
check_consistency(val_size, float)
|
||||
check_consistency(predict_size, float)
|
||||
if automatic_batching is not None:
|
||||
check_consistency(automatic_batching, bool)
|
||||
if compile is not None:
|
||||
check_consistency(compile, bool)
|
||||
if train_size + test_size + val_size + predict_size > 1:
|
||||
raise ValueError('train_size, test_size, val_size and predict_size '
|
||||
'must sum up to 1.')
|
||||
for size in [train_size, test_size, val_size, predict_size]:
|
||||
if size < 0 or size > 1:
|
||||
raise ValueError('splitting sizes for train, validation, test '
|
||||
'and prediction must be between [0, 1].')
|
||||
if batch_size is not None:
|
||||
check_consistency(batch_size, int)
|
||||
|
||||
# inference mode set to false when validating/testing PINNs otherwise
|
||||
# gradient is not tracked and optimization_cycle fails
|
||||
if isinstance(solver, PINNInterface):
|
||||
kwargs['inference_mode'] = False
|
||||
|
||||
# Logging depends on the batch size, when batch_size is None then
|
||||
# log_every_n_steps should be zero
|
||||
if batch_size is None:
|
||||
kwargs['log_every_n_steps'] = 0
|
||||
else:
|
||||
kwargs.setdefault('log_every_n_steps', 50) # default for lightning
|
||||
|
||||
# Setting default kwargs, overriding lightning defaults
|
||||
kwargs.setdefault('enable_progress_bar', True)
|
||||
kwargs.setdefault('logger', None)
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# check inheritance consistency for solver and batch size
|
||||
check_consistency(solver, SolverInterface)
|
||||
if batch_size is not None:
|
||||
check_consistency(batch_size, int)
|
||||
# checking compilation and automatic batching
|
||||
if compile is None or sys.platform == "win32":
|
||||
compile = False
|
||||
if automatic_batching is None:
|
||||
automatic_batching = False
|
||||
|
||||
# set attributes
|
||||
self.compile = compile
|
||||
self.automatic_batching = automatic_batching
|
||||
self.train_size = train_size
|
||||
self.test_size = test_size
|
||||
self.val_size = val_size
|
||||
@@ -47,9 +107,19 @@ class Trainer(lightning.pytorch.Trainer):
|
||||
self.data_module = None
|
||||
self._create_loader()
|
||||
|
||||
# logging
|
||||
self.logging_kwargs = {
|
||||
'logger': bool(
|
||||
kwargs['logger'] is None or kwargs['logger'] is True),
|
||||
'sync_dist': bool(
|
||||
len(self._accelerator_connector._parallel_devices) > 1),
|
||||
'on_step': bool(kwargs['log_every_n_steps'] > 0),
|
||||
'prog_bar': bool(kwargs['enable_progress_bar']),
|
||||
'on_epoch': True
|
||||
}
|
||||
|
||||
def _move_to_device(self):
|
||||
device = self._accelerator_connector._parallel_devices[0]
|
||||
|
||||
# move parameters to device
|
||||
pb = self.solver.problem
|
||||
if hasattr(pb, "unknown_parameters"):
|
||||
@@ -65,37 +135,34 @@ class Trainer(lightning.pytorch.Trainer):
|
||||
"""
|
||||
if not self.solver.problem.are_all_domains_discretised:
|
||||
error_message = '\n'.join([
|
||||
f"""{" " * 13} ---> Domain {key} {"sampled" if key in self.solver.problem.discretised_domains else
|
||||
"not sampled"}""" for key in
|
||||
f"""{" " * 13} ---> Domain {key} {
|
||||
"sampled" if key in self.solver.problem.discretised_domains else
|
||||
"not sampled"}""" for key in
|
||||
self.solver.problem.domains.keys()
|
||||
])
|
||||
raise RuntimeError('Cannot create Trainer if not all conditions '
|
||||
'are sampled. The Trainer got the following:\n'
|
||||
f'{error_message}')
|
||||
automatic_batching = False
|
||||
self.data_module = PinaDataModule(self.solver.problem,
|
||||
train_size=self.train_size,
|
||||
test_size=self.test_size,
|
||||
val_size=self.val_size,
|
||||
predict_size=self.predict_size,
|
||||
batch_size=self.batch_size,
|
||||
automatic_batching=automatic_batching)
|
||||
self.data_module = PinaDataModule(
|
||||
self.solver.problem,
|
||||
train_size=self.train_size,
|
||||
test_size=self.test_size,
|
||||
val_size=self.val_size,
|
||||
predict_size=self.predict_size,
|
||||
batch_size=self.batch_size,
|
||||
automatic_batching=self.automatic_batching)
|
||||
|
||||
def train(self, **kwargs):
|
||||
"""
|
||||
Train the solver method.
|
||||
"""
|
||||
return super().fit(self.solver,
|
||||
datamodule=self.data_module,
|
||||
**kwargs)
|
||||
return super().fit(self.solver, datamodule=self.data_module, **kwargs)
|
||||
|
||||
def test(self, **kwargs):
|
||||
"""
|
||||
Test the solver method.
|
||||
"""
|
||||
return super().test(self.solver,
|
||||
datamodule=self.data_module,
|
||||
**kwargs)
|
||||
return super().test(self.solver, datamodule=self.data_module, **kwargs)
|
||||
|
||||
@property
|
||||
def solver(self):
|
||||
|
||||
105
pina/utils.py
105
pina/utils.py
@@ -1,15 +1,11 @@
|
||||
"""Utils module"""
|
||||
"""Utils module."""
|
||||
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
from functools import reduce
|
||||
import types
|
||||
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, default_collate, ConcatDataset
|
||||
|
||||
from functools import reduce
|
||||
from .label_tensor import LabelTensor
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def check_consistency(object, object_instance, subclass=False):
|
||||
@@ -39,35 +35,27 @@ def check_consistency(object, object_instance, subclass=False):
|
||||
except AssertionError:
|
||||
raise ValueError(f"{type(obj).__name__} must be {object_instance}.")
|
||||
|
||||
|
||||
def number_parameters(model,
|
||||
aggregate=True,
|
||||
only_trainable=True): # TODO: check
|
||||
def labelize_forward(forward, input_variables, output_variables):
|
||||
"""
|
||||
Return the number of parameters of a given `model`.
|
||||
Wrapper decorator to allow users to enable or disable the use of
|
||||
LabelTensors during the forward pass.
|
||||
|
||||
:param torch.nn.Module model: the torch module to inspect.
|
||||
:param bool aggregate: if True the return values is an integer corresponding
|
||||
to the total amount of parameters of whole model. If False, it returns a
|
||||
dictionary whose keys are the names of layers and the values the
|
||||
corresponding number of parameters. Default is True.
|
||||
:param bool trainable: if True, only trainable parameters are count,
|
||||
otherwise no. Default is True.
|
||||
:return: the number of parameters of the model
|
||||
:rtype: dict or int
|
||||
:param forward: The torch.nn.Module forward function.
|
||||
:type forward: Callable
|
||||
:param input_variables: The problem input variables.
|
||||
:type input_variables: list[str] | tuple[str]
|
||||
:param output_variables: The problem output variables.
|
||||
:type output_variables: list[str] | tuple[str]
|
||||
"""
|
||||
tmp = {}
|
||||
for name, parameter in model.named_parameters():
|
||||
if only_trainable and not parameter.requires_grad:
|
||||
continue
|
||||
|
||||
tmp[name] = parameter.numel()
|
||||
|
||||
if aggregate:
|
||||
tmp = sum(tmp.values())
|
||||
|
||||
return tmp
|
||||
|
||||
def wrapper(x):
|
||||
x = x.extract(input_variables)
|
||||
output = forward(x)
|
||||
# keep it like this, directly using LabelTensor(...) raises errors
|
||||
# when compiling the code
|
||||
output = output.as_subclass(LabelTensor)
|
||||
output.labels = output_variables
|
||||
return output
|
||||
return wrapper
|
||||
|
||||
def merge_tensors(tensors): # name to be changed
|
||||
if tensors:
|
||||
@@ -142,56 +130,3 @@ def chebyshev_roots(n):
|
||||
k = torch.arange(n)
|
||||
nodes = torch.sort(torch.cos(pi * (k + 0.5) / n))[0]
|
||||
return nodes
|
||||
|
||||
|
||||
# class PinaDataset():
|
||||
|
||||
# def __init__(self, pinn) -> None:
|
||||
# self.pinn = pinn
|
||||
|
||||
# @property
|
||||
# def dataloader(self):
|
||||
# return self._create_dataloader()
|
||||
|
||||
# @property
|
||||
# def dataset(self):
|
||||
# return [self.SampleDataset(key, val)
|
||||
# for key, val in self.input_pts.items()]
|
||||
|
||||
# def _create_dataloader(self):
|
||||
# """Private method for creating dataloader
|
||||
|
||||
# :return: dataloader
|
||||
# :rtype: torch.utils.data.DataLoader
|
||||
# """
|
||||
# if self.pinn.batch_size is None:
|
||||
# return {key: [{key: val}] for key, val in self.pinn.input_pts.items()}
|
||||
|
||||
# def custom_collate(batch):
|
||||
# # extracting pts labels
|
||||
# _, pts = list(batch[0].items())[0]
|
||||
# labels = pts.labels
|
||||
# # calling default torch collate
|
||||
# collate_res = default_collate(batch)
|
||||
# # save collate result in dict
|
||||
# res = {}
|
||||
# for key, val in collate_res.items():
|
||||
# val.labels = labels
|
||||
# res[key] = val
|
||||
# __init__(self, location, tensor):
|
||||
# self._tensor = tensor
|
||||
# self._location = location
|
||||
# self._len = len(tensor)
|
||||
|
||||
# def __getitem__(self, index):
|
||||
# tensor = self._tensor.select(0, index)
|
||||
# return {self._location: tensor}
|
||||
|
||||
# def __len__(self):
|
||||
# return self._len
|
||||
|
||||
|
||||
class LabelTensorDataLoader(DataLoader):
|
||||
|
||||
def collate_fn(self, data):
|
||||
pass
|
||||
|
||||
@@ -2,9 +2,9 @@ import torch
|
||||
from pina.model import FNO
|
||||
|
||||
output_channels = 5
|
||||
batch_size = 15
|
||||
resolution = [30, 40, 50]
|
||||
lifting_dim = 128
|
||||
batch_size = 4
|
||||
resolution = [4, 6, 8]
|
||||
lifting_dim = 24
|
||||
|
||||
|
||||
def test_constructor():
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.model.network import Network
|
||||
from pina.model import FeedForward
|
||||
from pina import LabelTensor
|
||||
|
||||
data = torch.rand((20, 3))
|
||||
data_lt = LabelTensor(data, ['x', 'y', 'z'])
|
||||
input_dim = 3
|
||||
output_dim = 4
|
||||
torchmodel = FeedForward(input_dim, output_dim)
|
||||
extra_feat = []
|
||||
|
||||
|
||||
def test_constructor():
|
||||
Network(model=torchmodel,
|
||||
input_variables=['x', 'y', 'z'],
|
||||
output_variables=['a', 'b', 'c', 'd'],
|
||||
extra_features=None)
|
||||
|
||||
def test_forward():
|
||||
net = Network(model=torchmodel,
|
||||
input_variables=['x', 'y', 'z'],
|
||||
output_variables=['a', 'b', 'c', 'd'],
|
||||
extra_features=None)
|
||||
out = net.torchmodel(data)
|
||||
out_lt = net(data_lt)
|
||||
assert isinstance(out, torch.Tensor)
|
||||
assert isinstance(out_lt, LabelTensor)
|
||||
assert out.shape == (20, 4)
|
||||
assert out_lt.shape == (20, 4)
|
||||
assert torch.allclose(out_lt, out)
|
||||
assert out_lt.labels == ['a', 'b', 'c', 'd']
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
net(data)
|
||||
|
||||
def test_backward():
|
||||
net = Network(model=torchmodel,
|
||||
input_variables=['x', 'y', 'z'],
|
||||
output_variables=['a', 'b', 'c', 'd'],
|
||||
extra_features=None)
|
||||
data = torch.rand((20, 3))
|
||||
data.requires_grad = True
|
||||
out = net.torchmodel(data)
|
||||
l = torch.mean(out)
|
||||
l.backward()
|
||||
assert data._grad.shape == torch.Size([20, 3])
|
||||
@@ -1,111 +0,0 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina import Condition, LabelTensor, Trainer
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.model import FeedForward
|
||||
from pina.solvers import PINNInterface
|
||||
from pina.problem.zoo import Poisson2DSquareProblem as Poisson
|
||||
# from pina.equation import Equation
|
||||
# from pina.equation.equation_factory import FixedValue
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
# in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
|
||||
# out2_ = LabelTensor(torch.rand(60, 1), ['u'])
|
||||
|
||||
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3': Condition(
|
||||
# location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4': Condition(
|
||||
# location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D': Condition(
|
||||
# input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
# equation=my_laplace),
|
||||
# 'data': Condition(
|
||||
# input_points=in_,
|
||||
# output_points=out_),
|
||||
# 'data2': Condition(
|
||||
# input_points=in2_,
|
||||
# output_points=out2_)
|
||||
# }
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
|
||||
# from pina import TorchOptimizer
|
||||
|
||||
# class FOOPINN(PINNInterface):
|
||||
# def __init__(self, model, problem):
|
||||
# super().__init__(models=[model], problem=problem,
|
||||
# optimizers=TorchOptimizer(torch.optim.Adam, lr=1e-3),
|
||||
# loss=torch.nn.MSELoss())
|
||||
# def forward(self, x):
|
||||
# return self.models[0](x)
|
||||
|
||||
# def loss_phys(self, samples, equation):
|
||||
# residual = self.compute_residual(samples=samples, equation=equation)
|
||||
# loss_value = self.loss(
|
||||
# torch.zeros_like(residual, requires_grad=True), residual
|
||||
# )
|
||||
# self.store_log(loss_value=float(loss_value))
|
||||
# return loss_value
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson()
|
||||
# poisson_problem.discretise_domain(100)
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# with pytest.raises(TypeError):
|
||||
# PINNInterface()
|
||||
# # a simple pinn built with PINNInterface
|
||||
# FOOPINN(model, poisson_problem)
|
||||
|
||||
# def test_train_step():
|
||||
# solver = FOOPINN(model, poisson_problem)
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# solver = FOOPINN(model, poisson_problem)
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in poisson_problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
156
tests/test_solvers/test_causal_pinn.py
Normal file
156
tests/test_solvers/test_causal_pinn.py
Normal file
@@ -0,0 +1,156 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.solvers import CausalPINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.problem.zoo import (
|
||||
DiffusionReactionProblem,
|
||||
InverseDiffusionReactionProblem
|
||||
)
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
class DummySpatialProblem(SpatialProblem):
|
||||
'''
|
||||
A mock spatial problem for testing purposes.
|
||||
'''
|
||||
output_variables = ['u']
|
||||
conditions = {}
|
||||
spatial_domain = None
|
||||
|
||||
|
||||
# define problems and model
|
||||
problem = DiffusionReactionProblem()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InverseDiffusionReactionProblem()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("eps", [100, 100.1])
|
||||
def test_constructor(problem, eps):
|
||||
with pytest.raises(ValueError):
|
||||
CausalPINN(model=model, problem=DummySpatialProblem())
|
||||
solver = CausalPINN(model=model, problem=problem, eps=eps)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, batch_size, compile):
|
||||
solver = CausalPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, batch_size, compile):
|
||||
solver = CausalPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, batch_size, compile):
|
||||
solver = CausalPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = CausalPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# loading
|
||||
new_solver = CausalPINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (
|
||||
solver.forward(test_pts).shape
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
@@ -1,278 +0,0 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.problem import TimeDependentProblem, InverseProblem, SpatialProblem
|
||||
from pina.operators import grad
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import CausalPINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
|
||||
|
||||
|
||||
# class FooProblem(SpatialProblem):
|
||||
# '''
|
||||
# Foo problem formulation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# conditions = {}
|
||||
# spatial_domain = None
|
||||
|
||||
|
||||
# class InverseDiffusionReactionSystem(TimeDependentProblem, SpatialProblem, InverseProblem):
|
||||
|
||||
# def diffusionreaction(input_, output_, params_):
|
||||
# x = input_.extract('x')
|
||||
# t = input_.extract('t')
|
||||
# u_t = grad(output_, input_, d='t')
|
||||
# u_x = grad(output_, input_, d='x')
|
||||
# u_xx = grad(u_x, input_, d='x')
|
||||
# r = torch.exp(-t) * (1.5 * torch.sin(2*x) + (8/3)*torch.sin(3*x) +
|
||||
# (15/4)*torch.sin(4*x) + (63/8)*torch.sin(8*x))
|
||||
# return u_t - params_['mu']*u_xx - r
|
||||
|
||||
# def _solution(self, pts):
|
||||
# t = pts.extract('t')
|
||||
# x = pts.extract('x')
|
||||
# return torch.exp(-t) * (torch.sin(x) + (1/2)*torch.sin(2*x) +
|
||||
# (1/3)*torch.sin(3*x) + (1/4)*torch.sin(4*x) +
|
||||
# (1/8)*torch.sin(8*x))
|
||||
|
||||
# # assign output/ spatial and temporal variables
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [-torch.pi, torch.pi]})
|
||||
# temporal_domain = CartesianDomain({'t': [0, 1]})
|
||||
# unknown_parameter_domain = CartesianDomain({'mu': [-1, 1]})
|
||||
|
||||
# # problem condition statement
|
||||
# conditions = {
|
||||
# 'D': Condition(location=CartesianDomain({'x': [-torch.pi, torch.pi],
|
||||
# 't': [0, 1]}),
|
||||
# equation=Equation(diffusionreaction)),
|
||||
# 'data' : Condition(input_points=LabelTensor(torch.tensor([[0., 0.]]), ['x', 't']),
|
||||
# output_points=LabelTensor(torch.tensor([[0.]]), ['u'])),
|
||||
# }
|
||||
|
||||
# class DiffusionReactionSystem(TimeDependentProblem, SpatialProblem):
|
||||
|
||||
# def diffusionreaction(input_, output_):
|
||||
# x = input_.extract('x')
|
||||
# t = input_.extract('t')
|
||||
# u_t = grad(output_, input_, d='t')
|
||||
# u_x = grad(output_, input_, d='x')
|
||||
# u_xx = grad(u_x, input_, d='x')
|
||||
# r = torch.exp(-t) * (1.5 * torch.sin(2*x) + (8/3)*torch.sin(3*x) +
|
||||
# (15/4)*torch.sin(4*x) + (63/8)*torch.sin(8*x))
|
||||
# return u_t - u_xx - r
|
||||
|
||||
# def _solution(self, pts):
|
||||
# t = pts.extract('t')
|
||||
# x = pts.extract('x')
|
||||
# return torch.exp(-t) * (torch.sin(x) + (1/2)*torch.sin(2*x) +
|
||||
# (1/3)*torch.sin(3*x) + (1/4)*torch.sin(4*x) +
|
||||
# (1/8)*torch.sin(8*x))
|
||||
|
||||
# # assign output/ spatial and temporal variables
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [-torch.pi, torch.pi]})
|
||||
# temporal_domain = CartesianDomain({'t': [0, 1]})
|
||||
|
||||
# # problem condition statement
|
||||
# conditions = {
|
||||
# 'D': Condition(location=CartesianDomain({'x': [-torch.pi, torch.pi],
|
||||
# 't': [0, 1]}),
|
||||
# equation=Equation(diffusionreaction)),
|
||||
# }
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['x']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)'])
|
||||
|
||||
|
||||
# # make the problem
|
||||
# problem = DiffusionReactionSystem()
|
||||
# model = FeedForward(len(problem.input_variables),
|
||||
# len(problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(problem.input_variables) + 1,
|
||||
# len(problem.output_variables))
|
||||
# extra_feats = [myFeature()]
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# CausalPINN(problem=problem, model=model, extra_features=None)
|
||||
|
||||
# with pytest.raises(ValueError):
|
||||
# CausalPINN(FooProblem(), model=model, extra_features=None)
|
||||
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(problem.input_variables) + 1,
|
||||
# len(problem.output_variables))
|
||||
# CausalPINN(problem=problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# problem = DiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 10
|
||||
# problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = CausalPINN(problem = problem,
|
||||
# model=model, extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# problem.discretise_domain(100)
|
||||
# solver = CausalPINN(problem = problem,
|
||||
# model=model, extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# problem = DiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 10
|
||||
# problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = CausalPINN(problem=problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=5.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# problem = DiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 10
|
||||
# problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = CausalPINN(problem=problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = CausalPINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt',
|
||||
# problem = problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 't': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# problem = InverseDiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 100
|
||||
# problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = CausalPINN(problem = problem,
|
||||
# model=model, extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # # TODO does not currently work
|
||||
# # def test_train_inverse_problem_restore():
|
||||
# # tmpdir = "tests/tmp_restore_inv"
|
||||
# # problem = InverseDiffusionReactionSystem()
|
||||
# # boundaries = ['D']
|
||||
# # n = 100
|
||||
# # problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# # pinn = CausalPINN(problem=problem,
|
||||
# # model=model,
|
||||
# # extra_features=None,
|
||||
# # loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn,
|
||||
# # max_epochs=5,
|
||||
# # accelerator='cpu',
|
||||
# # default_root_dir=tmpdir)
|
||||
# # trainer.train()
|
||||
# # ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# # t = ntrainer.train(
|
||||
# # ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt')
|
||||
# # import shutil
|
||||
# # shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# problem = InverseDiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 100
|
||||
# problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = CausalPINN(problem=problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = CausalPINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 't': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_extra_feats_cpu():
|
||||
# problem = DiffusionReactionSystem()
|
||||
# boundaries = ['D']
|
||||
# n = 10
|
||||
# problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = CausalPINN(problem=problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# trainer.train()
|
||||
@@ -1,429 +1,145 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import CompetitivePINN as PINN
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.solvers import CompetitivePINN as CompPINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
from pina.problem.zoo import (
|
||||
Poisson2DSquareProblem as Poisson,
|
||||
InversePoisson2DSquareProblem as InversePoisson
|
||||
)
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
# define problems and model
|
||||
problem = Poisson()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InversePoisson()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("discr", [None, model])
|
||||
def test_constructor(problem, discr):
|
||||
solver = CompPINN(problem=problem, model=model)
|
||||
solver = CompPINN(problem=problem, model=model, discriminator=discr)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, batch_size, compile):
|
||||
solver = CompPINN(problem=problem, model=model)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
# in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
|
||||
# out2_ = LabelTensor(torch.rand(60, 1), ['u'])
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, batch_size, compile):
|
||||
solver = CompPINN(problem=problem, model=model)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
# class InversePoisson(SpatialProblem, InverseProblem):
|
||||
# '''
|
||||
# Problem definition for the Poisson equation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# x_min = -2
|
||||
# x_max = 2
|
||||
# y_min = -2
|
||||
# y_max = 2
|
||||
# data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
# data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
# spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
# # define the ranges for the parameters
|
||||
# unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, batch_size, compile):
|
||||
solver = CompPINN(problem=problem, model=model)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
# def laplace_equation(input_, output_, params_):
|
||||
# '''
|
||||
# Laplace equation with a force term.
|
||||
# '''
|
||||
# force_term = torch.exp(
|
||||
# - 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
# - 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
# delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = CompPINN(problem=problem, model=model)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# return delta_u - force_term
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# # define the conditions for the loss (boundary conditions, equation, data)
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max],
|
||||
# 'y': y_max}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma2': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': y_min
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma3': Condition(location=CartesianDomain(
|
||||
# {'x': x_max, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma4': Condition(location=CartesianDomain(
|
||||
# {'x': x_min, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'D': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=Equation(laplace_equation)),
|
||||
# 'data': Condition(input_points=data_input.extract(['x', 'y']),
|
||||
# output_points=data_output)
|
||||
# }
|
||||
# loading
|
||||
new_solver = CompPINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (
|
||||
solver.forward(test_pts).shape
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3': Condition(
|
||||
# location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4': Condition(
|
||||
# location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D': Condition(
|
||||
# input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
# equation=my_laplace),
|
||||
# 'data': Condition(
|
||||
# input_points=in_,
|
||||
# output_points=out_),
|
||||
# 'data2': Condition(
|
||||
# input_points=in2_,
|
||||
# output_points=out2_)
|
||||
# }
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['x']) * torch.pi) *
|
||||
# torch.sin(x.extract(['y']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson()
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# extra_feats = [myFeature()]
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# PINN(problem=poisson_problem, model=model)
|
||||
# PINN(problem=poisson_problem, model=model, discriminator = model)
|
||||
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# with pytest.raises(TypeError):
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# poisson_problem.discretise_domain(100)
|
||||
# solver = PINN(problem = poisson_problem, model=model, loss=LpLoss())
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in poisson_problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=10.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # # TODO does not currently work
|
||||
# # def test_train_inverse_problem_restore():
|
||||
# # tmpdir = "tests/tmp_restore_inv"
|
||||
# # poisson_problem = InversePoisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# # n = 100
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# # pinn = PINN(problem=poisson_problem,
|
||||
# # model=model,
|
||||
# # loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn,
|
||||
# # max_epochs=5,
|
||||
# # accelerator='cpu',
|
||||
# # default_root_dir=tmpdir)
|
||||
# # trainer.train()
|
||||
# # ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# # t = ntrainer.train(
|
||||
# # ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
|
||||
# # import shutil
|
||||
# # shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# # # TODO fix asap. Basically sampling few variables
|
||||
# # # works only if both variables are in a range.
|
||||
# # # if one is fixed and the other not, this will
|
||||
# # # not work. This test also needs to be fixed and
|
||||
# # # insert in test problem not in test pinn.
|
||||
# # def test_train_cpu_sampling_few_vars():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['x'])
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['y'])
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'cpu'})
|
||||
# # trainer.train()
|
||||
|
||||
|
||||
# # TODO, fix GitHub actions to run also on GPU
|
||||
# # def test_train_gpu():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_gpu(): #TODO fix ASAP
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_2():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_extra_feats():
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
|
||||
# # def test_train_2_extra_feats():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_optimizer_kwargs():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_lr_scheduler():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(
|
||||
# # problem,
|
||||
# # model,
|
||||
# # lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
# # lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
# # )
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # # def test_train_batch():
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
|
||||
# # # def test_train_batch_2():
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # # param = [0, 3]
|
||||
# # # for i, truth_key in zip(param, expected_keys):
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(50, save_loss=i)
|
||||
# # # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # if torch.cuda.is_available():
|
||||
|
||||
# # # def test_gpu_train():
|
||||
# # # pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 100
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
# # def test_gpu_train_nobatch():
|
||||
# # pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 100
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
@@ -1,167 +1,177 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from pina.problem import AbstractProblem
|
||||
import pytest
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import GAROM
|
||||
from pina.condition import InputOutputPointsCondition
|
||||
from pina.problem import AbstractProblem
|
||||
from pina.model import FeedForward
|
||||
from pina.trainer import Trainer
|
||||
import torch.nn as nn
|
||||
import matplotlib.tri as tri
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# def func(x, mu1, mu2):
|
||||
# import torch
|
||||
# x_m1 = (x[:, 0] - mu1).pow(2)
|
||||
# x_m2 = (x[:, 1] - mu2).pow(2)
|
||||
# norm = x[:, 0]**2 + x[:, 1]**2
|
||||
# return torch.exp(-(x_m1 + x_m2))
|
||||
class TensorProblem(AbstractProblem):
|
||||
input_variables = ['u_0', 'u_1']
|
||||
output_variables = ['u']
|
||||
conditions = {
|
||||
'data': Condition(
|
||||
output_points=torch.randn(50, 2),
|
||||
input_points=torch.randn(50, 1))
|
||||
}
|
||||
|
||||
|
||||
# class ParametricGaussian(AbstractProblem):
|
||||
# output_variables = [f'u_{i}' for i in range(900)]
|
||||
# simple Generator Network
|
||||
class Generator(nn.Module):
|
||||
|
||||
# # params
|
||||
# xx = torch.linspace(-1, 1, 20)
|
||||
# yy = xx
|
||||
# params = LabelTensor(torch.cartesian_prod(xx, yy), labels=['mu1', 'mu2'])
|
||||
def __init__(self,
|
||||
input_dimension=2,
|
||||
parameters_dimension=1,
|
||||
noise_dimension=2,
|
||||
activation=torch.nn.SiLU):
|
||||
super().__init__()
|
||||
|
||||
# # define domain
|
||||
# x = torch.linspace(-1, 1, 30)
|
||||
# domain = torch.cartesian_prod(x, x)
|
||||
# triang = tri.Triangulation(domain[:, 0], domain[:, 1])
|
||||
# sol = []
|
||||
# for p in params:
|
||||
# sol.append(func(domain, p[0], p[1]))
|
||||
# snapshots = LabelTensor(torch.stack(sol), labels=output_variables)
|
||||
self._noise_dimension = noise_dimension
|
||||
self._activation = activation
|
||||
self.model = FeedForward(6*noise_dimension, input_dimension)
|
||||
self.condition = FeedForward(parameters_dimension, 5 * noise_dimension)
|
||||
|
||||
# # define conditions
|
||||
# conditions = {
|
||||
# 'data': Condition(input_points=params, output_points=snapshots)
|
||||
# }
|
||||
def forward(self, param):
|
||||
# uniform sampling in [-1, 1]
|
||||
z = 2 * torch.rand(size=(param.shape[0], self._noise_dimension),
|
||||
device=param.device,
|
||||
dtype=param.dtype,
|
||||
requires_grad=True) - 1
|
||||
return self.model(torch.cat((z, self.condition(param)), dim=-1))
|
||||
|
||||
# Simple Discriminator Network
|
||||
|
||||
|
||||
# # simple Generator Network
|
||||
# class Generator(nn.Module):
|
||||
class Discriminator(nn.Module):
|
||||
|
||||
# def __init__(self,
|
||||
# input_dimension,
|
||||
# parameters_dimension,
|
||||
# noise_dimension,
|
||||
# activation=torch.nn.SiLU):
|
||||
# super().__init__()
|
||||
def __init__(self,
|
||||
input_dimension=2,
|
||||
parameter_dimension=1,
|
||||
hidden_dimension=2,
|
||||
activation=torch.nn.ReLU):
|
||||
super().__init__()
|
||||
|
||||
# self._noise_dimension = noise_dimension
|
||||
# self._activation = activation
|
||||
self._activation = activation
|
||||
self.encoding = FeedForward(input_dimension, hidden_dimension)
|
||||
self.decoding = FeedForward(2*hidden_dimension, input_dimension)
|
||||
self.condition = FeedForward(parameter_dimension, hidden_dimension)
|
||||
|
||||
# self.model = torch.nn.Sequential(
|
||||
# torch.nn.Linear(6 * self._noise_dimension, input_dimension // 6),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 6, input_dimension // 3),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 3, input_dimension))
|
||||
# self.condition = torch.nn.Sequential(
|
||||
# torch.nn.Linear(parameters_dimension, 2 * self._noise_dimension),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(2 * self._noise_dimension,
|
||||
# 5 * self._noise_dimension))
|
||||
|
||||
# def forward(self, param):
|
||||
# # uniform sampling in [-1, 1]
|
||||
# z = torch.rand(size=(param.shape[0], self._noise_dimension),
|
||||
# device=param.device,
|
||||
# dtype=param.dtype,
|
||||
# requires_grad=True)
|
||||
# z = 2. * z - 1.
|
||||
|
||||
# # conditioning by concatenation of mapped parameters
|
||||
# input_ = torch.cat((z, self.condition(param)), dim=-1)
|
||||
# out = self.model(input_)
|
||||
|
||||
# return out
|
||||
def forward(self, data):
|
||||
x, condition = data
|
||||
encoding = self.encoding(x)
|
||||
conditioning = torch.cat((encoding, self.condition(condition)), dim=-1)
|
||||
decoding = self.decoding(conditioning)
|
||||
return decoding
|
||||
|
||||
|
||||
# # Simple Discriminator Network
|
||||
# class Discriminator(nn.Module):
|
||||
|
||||
# def __init__(self,
|
||||
# input_dimension,
|
||||
# parameter_dimension,
|
||||
# hidden_dimension,
|
||||
# activation=torch.nn.ReLU):
|
||||
# super().__init__()
|
||||
|
||||
# self._activation = activation
|
||||
# self.encoding = torch.nn.Sequential(
|
||||
# torch.nn.Linear(input_dimension, input_dimension // 3),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 3, input_dimension // 6),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 6, hidden_dimension))
|
||||
# self.decoding = torch.nn.Sequential(
|
||||
# torch.nn.Linear(2 * hidden_dimension, input_dimension // 6),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 6, input_dimension // 3),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(input_dimension // 3, input_dimension),
|
||||
# )
|
||||
|
||||
# self.condition = torch.nn.Sequential(
|
||||
# torch.nn.Linear(parameter_dimension, hidden_dimension // 2),
|
||||
# self._activation(),
|
||||
# torch.nn.Linear(hidden_dimension // 2, hidden_dimension))
|
||||
|
||||
# def forward(self, data):
|
||||
# x, condition = data
|
||||
# encoding = self.encoding(x)
|
||||
# conditioning = torch.cat((encoding, self.condition(condition)), dim=-1)
|
||||
# decoding = self.decoding(conditioning)
|
||||
# return decoding
|
||||
def test_constructor():
|
||||
GAROM(problem=TensorProblem(),
|
||||
generator=Generator(),
|
||||
discriminator=Discriminator())
|
||||
assert GAROM.accepted_conditions_types == (
|
||||
InputOutputPointsCondition
|
||||
)
|
||||
|
||||
|
||||
# problem = ParametricGaussian()
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(batch_size, compile):
|
||||
solver = GAROM(problem=TensorProblem(),
|
||||
generator=Generator(),
|
||||
discriminator=Discriminator())
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
test_size=0.,
|
||||
val_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# GAROM(problem=problem,
|
||||
# generator=Generator(input_dimension=900,
|
||||
# parameters_dimension=2,
|
||||
# noise_dimension=12),
|
||||
# discriminator=Discriminator(input_dimension=900,
|
||||
# parameter_dimension=2,
|
||||
# hidden_dimension=64))
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(batch_size, compile):
|
||||
solver = GAROM(problem=TensorProblem(),
|
||||
generator=Generator(),
|
||||
discriminator=Discriminator())
|
||||
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# solver = GAROM(problem=problem,
|
||||
# generator=Generator(input_dimension=900,
|
||||
# parameters_dimension=2,
|
||||
# noise_dimension=12),
|
||||
# discriminator=Discriminator(input_dimension=900,
|
||||
# parameter_dimension=2,
|
||||
# hidden_dimension=64))
|
||||
|
||||
# trainer = Trainer(solver=solver, max_epochs=4, accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(batch_size, compile):
|
||||
solver = GAROM(problem=TensorProblem(),
|
||||
generator=Generator(),
|
||||
discriminator=Discriminator(),
|
||||
)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.8,
|
||||
val_size=0.1,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, OptimizedModule)
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
# def test_sample():
|
||||
# solver = GAROM(problem=problem,
|
||||
# generator=Generator(input_dimension=900,
|
||||
# parameters_dimension=2,
|
||||
# noise_dimension=12),
|
||||
# discriminator=Discriminator(input_dimension=900,
|
||||
# parameter_dimension=2,
|
||||
# hidden_dimension=64))
|
||||
# solver.sample(problem.params)
|
||||
# assert solver.sample(problem.params).shape == problem.snapshots.shape
|
||||
def test_train_load_restore():
|
||||
dir = "tests/test_solvers/tmp/"
|
||||
problem = TensorProblem()
|
||||
solver = GAROM(problem=TensorProblem(),
|
||||
generator=Generator(),
|
||||
discriminator=Discriminator(),
|
||||
)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
test_size=0.1,
|
||||
val_size=0.,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# def test_forward():
|
||||
# solver = GAROM(problem=problem,
|
||||
# generator=Generator(input_dimension=900,
|
||||
# parameters_dimension=2,
|
||||
# noise_dimension=12),
|
||||
# discriminator=Discriminator(input_dimension=900,
|
||||
# parameter_dimension=2,
|
||||
# hidden_dimension=64))
|
||||
# solver(problem.params, mc_steps=100, variance=True)
|
||||
# assert solver(problem.params).shape == problem.snapshots.shape
|
||||
# loading
|
||||
new_solver = GAROM.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=TensorProblem(), generator=Generator(), discriminator=Discriminator())
|
||||
|
||||
test_pts = torch.rand(20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (20, 2)
|
||||
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
|
||||
@@ -1,444 +0,0 @@
|
||||
import torch
|
||||
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import GPINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
# in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
|
||||
# out2_ = LabelTensor(torch.rand(60, 1), ['u'])
|
||||
|
||||
|
||||
# class InversePoisson(SpatialProblem, InverseProblem):
|
||||
# '''
|
||||
# Problem definition for the Poisson equation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# x_min = -2
|
||||
# x_max = 2
|
||||
# y_min = -2
|
||||
# y_max = 2
|
||||
# data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
# data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
# spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
# # define the ranges for the parameters
|
||||
# unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
|
||||
# def laplace_equation(input_, output_, params_):
|
||||
# '''
|
||||
# Laplace equation with a force term.
|
||||
# '''
|
||||
# force_term = torch.exp(
|
||||
# - 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
# - 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
# delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
|
||||
# return delta_u - force_term
|
||||
|
||||
# # define the conditions for the loss (boundary conditions, equation, data)
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max],
|
||||
# 'y': y_max}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma2': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': y_min}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma3': Condition(location=CartesianDomain(
|
||||
# {'x': x_max, 'y': [y_min, y_max]}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma4': Condition(location=CartesianDomain(
|
||||
# {'x': x_min, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'D': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=Equation(laplace_equation)),
|
||||
# 'data': Condition(
|
||||
# input_points=data_input.extract(['x', 'y']),
|
||||
# output_points=data_output)
|
||||
# }
|
||||
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3': Condition(
|
||||
# location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4': Condition(
|
||||
# location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D': Condition(
|
||||
# input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
# equation=my_laplace),
|
||||
# 'data': Condition(
|
||||
# input_points=in_,
|
||||
# output_points=out_),
|
||||
# 'data2': Condition(
|
||||
# input_points=in2_,
|
||||
# output_points=out2_)
|
||||
# }
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['x']) * torch.pi) *
|
||||
# torch.sin(x.extract(['y']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson()
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# extra_feats = [myFeature()]
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# GPINN(problem=poisson_problem, model=model, extra_features=None)
|
||||
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# GPINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = GPINN(problem = poisson_problem,
|
||||
# model=model, extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# poisson_problem.discretise_domain(100)
|
||||
# solver = GPINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in poisson_problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = GPINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=10.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = GPINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = GPINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = GPINN(problem = poisson_problem,
|
||||
# model=model, extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # # TODO does not currently work
|
||||
# # def test_train_inverse_problem_restore():
|
||||
# # tmpdir = "tests/tmp_restore_inv"
|
||||
# # poisson_problem = InversePoisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# # n = 100
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# # pinn = GPINN(problem=poisson_problem,
|
||||
# # model=model,
|
||||
# # extra_features=None,
|
||||
# # loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn,
|
||||
# # max_epochs=5,
|
||||
# # accelerator='cpu',
|
||||
# # default_root_dir=tmpdir)
|
||||
# # trainer.train()
|
||||
# # ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# # t = ntrainer.train(
|
||||
# # ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
|
||||
# # import shutil
|
||||
# # shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = GPINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = GPINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# # # TODO fix asap. Basically sampling few variables
|
||||
# # # works only if both variables are in a range.
|
||||
# # # if one is fixed and the other not, this will
|
||||
# # # not work. This test also needs to be fixed and
|
||||
# # # insert in test problem not in test pinn.
|
||||
# # def test_train_cpu_sampling_few_vars():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['x'])
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['y'])
|
||||
# # pinn = GPINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'cpu'})
|
||||
# # trainer.train()
|
||||
|
||||
|
||||
# def test_train_extra_feats_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = GPINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # TODO, fix GitHub actions to run also on GPU
|
||||
# # def test_train_gpu():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn = GPINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_gpu(): #TODO fix ASAP
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
|
||||
# # pinn = GPINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_2():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = GPINN(problem, model)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_extra_feats():
|
||||
# # pinn = GPINN(problem, model_extra_feat, [myFeature()])
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
|
||||
# # def test_train_2_extra_feats():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = GPINN(problem, model_extra_feat, [myFeature()])
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_optimizer_kwargs():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = GPINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_lr_scheduler():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = GPINN(
|
||||
# # problem,
|
||||
# # model,
|
||||
# # lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
# # lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
# # )
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # # def test_train_batch():
|
||||
# # # pinn = GPINN(problem, model, batch_size=6)
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
|
||||
# # # def test_train_batch_2():
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # # param = [0, 3]
|
||||
# # # for i, truth_key in zip(param, expected_keys):
|
||||
# # # pinn = GPINN(problem, model, batch_size=6)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(50, save_loss=i)
|
||||
# # # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # if torch.cuda.is_available():
|
||||
|
||||
# # # def test_gpu_train():
|
||||
# # # pinn = GPINN(problem, model, batch_size=20, device='cuda')
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 100
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
# # def test_gpu_train_nobatch():
|
||||
# # pinn = GPINN(problem, model, batch_size=None, device='cuda')
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 100
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
155
tests/test_solvers/test_gradient_pinn.py
Normal file
155
tests/test_solvers/test_gradient_pinn.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.problem import TimeDependentProblem
|
||||
from pina.solvers import GradientPINN
|
||||
from pina.model import FeedForward
|
||||
from pina.trainer import Trainer
|
||||
from pina.problem.zoo import (
|
||||
Poisson2DSquareProblem as Poisson,
|
||||
InversePoisson2DSquareProblem as InversePoisson
|
||||
)
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
class DummyTimeProblem(TimeDependentProblem):
|
||||
"""
|
||||
A mock time-dependent problem for testing purposes.
|
||||
"""
|
||||
output_variables = ['u']
|
||||
temporal_domain = None
|
||||
conditions = {}
|
||||
|
||||
|
||||
# define problems and model
|
||||
problem = Poisson()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InversePoisson()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_constructor(problem):
|
||||
with pytest.raises(ValueError):
|
||||
GradientPINN(model=model, problem=DummyTimeProblem())
|
||||
solver = GradientPINN(model=model, problem=problem)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, batch_size, compile):
|
||||
solver = GradientPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, batch_size, compile):
|
||||
solver = GradientPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, batch_size, compile):
|
||||
solver = GradientPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = GradientPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# loading
|
||||
new_solver = GradientPINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (
|
||||
solver.forward(test_pts).shape
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
@@ -1,185 +1,134 @@
|
||||
import pytest
|
||||
import torch
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import PINN
|
||||
from pina.trainer import Trainer
|
||||
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
from pina.problem.zoo import Poisson2DSquareProblem
|
||||
|
||||
# class InversePoisson(SpatialProblem, InverseProblem):
|
||||
# '''
|
||||
# Problem definition for the Poisson equation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# x_min = -2
|
||||
# x_max = 2
|
||||
# y_min = -2
|
||||
# y_max = 2
|
||||
# data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
# data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
# spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
# # define the ranges for the parameters
|
||||
# unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
|
||||
# def laplace_equation(input_, output_, params_):
|
||||
# '''
|
||||
# Laplace equation with a force term.
|
||||
# '''
|
||||
# force_term = torch.exp(
|
||||
# - 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
# - 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
# delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
|
||||
# return delta_u - force_term
|
||||
|
||||
# # define the conditions for the loss (boundary conditions, equation, data)
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(domain=CartesianDomain({'x': [x_min, x_max],
|
||||
# 'y': y_max}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma2': Condition(domain=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': y_min
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma3': Condition(domain=CartesianDomain(
|
||||
# {'x': x_max, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma4': Condition(domain=CartesianDomain(
|
||||
# {'x': x_min, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'D': Condition(domain=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=Equation(laplace_equation)),
|
||||
# 'data': Condition(input_points=data_input.extract(['x', 'y']),
|
||||
# output_points=data_output)
|
||||
# }
|
||||
from pina.trainer import Trainer
|
||||
from pina.solvers import PINN
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from pina.problem.zoo import (
|
||||
Poisson2DSquareProblem as Poisson,
|
||||
InversePoisson2DSquareProblem as InversePoisson
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson2DSquareProblem()
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# define problems and model
|
||||
problem = Poisson()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InversePoisson()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_constructor(problem):
|
||||
solver = PINN(problem=problem, model=model)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, batch_size, compile):
|
||||
solver = PINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# PINN(problem=poisson_problem, model=model, extra_features=None)
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, batch_size, compile):
|
||||
solver = PINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert(isinstance(solver.model, OptimizedModule))
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, batch_size, compile):
|
||||
solver = PINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats)
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = PINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# def test_train_cpu():
|
||||
# poisson_problem = Poisson2DSquareProblem()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20, val_size=0., train_size=1., test_size=0.)
|
||||
# loading
|
||||
new_solver = PINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# poisson_problem = Poisson2DSquareProblem()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# poisson_problem = Poisson2DSquareProblem()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=5.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries,
|
||||
# variables=['x', 'y'])
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
|
||||
@@ -1,449 +1,157 @@
|
||||
import torch
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import RBAPINN as PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
from pina.trainer import Trainer
|
||||
from pina.solvers import RBAPINN
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from pina.problem.zoo import (
|
||||
Poisson2DSquareProblem as Poisson,
|
||||
InversePoisson2DSquareProblem as InversePoisson
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
# define problems and model
|
||||
problem = Poisson()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InversePoisson()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("eta", [1, 0.001])
|
||||
@pytest.mark.parametrize("gamma", [0.5, 0.9])
|
||||
def test_constructor(problem, eta, gamma):
|
||||
with pytest.raises(AssertionError):
|
||||
solver = RBAPINN(model=model, problem=problem, gamma=1.5)
|
||||
solver = RBAPINN(model=model, problem=problem, eta=eta, gamma=gamma)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
# in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
|
||||
# out2_ = LabelTensor(torch.rand(60, 1), ['u'])
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_wrong_batch(problem):
|
||||
with pytest.raises(NotImplementedError):
|
||||
solver = RBAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=10,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.)
|
||||
trainer.train()
|
||||
|
||||
|
||||
# class InversePoisson(SpatialProblem, InverseProblem):
|
||||
# '''
|
||||
# Problem definition for the Poisson equation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# x_min = -2
|
||||
# x_max = 2
|
||||
# y_min = -2
|
||||
# y_max = 2
|
||||
# data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
# data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
# spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
# # define the ranges for the parameters
|
||||
# unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
|
||||
# def laplace_equation(input_, output_, params_):
|
||||
# '''
|
||||
# Laplace equation with a force term.
|
||||
# '''
|
||||
# force_term = torch.exp(
|
||||
# - 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
# - 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
# delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
|
||||
# return delta_u - force_term
|
||||
|
||||
# # define the conditions for the loss (boundary conditions, equation, data)
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max],
|
||||
# 'y': y_max}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma2': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': y_min
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma3': Condition(location=CartesianDomain(
|
||||
# {'x': x_max, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma4': Condition(location=CartesianDomain(
|
||||
# {'x': x_min, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'D': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=Equation(laplace_equation)),
|
||||
# 'data': Condition(input_points=data_input.extract(['x', 'y']),
|
||||
# output_points=data_output)
|
||||
# }
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, compile):
|
||||
solver = RBAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3': Condition(
|
||||
# location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4': Condition(
|
||||
# location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D': Condition(
|
||||
# input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
# equation=my_laplace),
|
||||
# 'data': Condition(
|
||||
# input_points=in_,
|
||||
# output_points=out_),
|
||||
# 'data2': Condition(
|
||||
# input_points=in2_,
|
||||
# output_points=out2_)
|
||||
# }
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, compile):
|
||||
solver = RBAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['x']) * torch.pi) *
|
||||
# torch.sin(x.extract(['y']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, compile):
|
||||
solver = RBAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson()
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# extra_feats = [myFeature()]
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = RBAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# def test_constructor():
|
||||
# PINN(problem=poisson_problem, model=model, extra_features=None)
|
||||
# with pytest.raises(ValueError):
|
||||
# PINN(problem=poisson_problem, model=model, eta='x')
|
||||
# PINN(problem=poisson_problem, model=model, gamma='x')
|
||||
# loading
|
||||
new_solver = RBAPINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (
|
||||
solver.forward(test_pts).shape
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# poisson_problem.discretise_domain(100)
|
||||
# solver = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in poisson_problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=10.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # # TODO does not currently work
|
||||
# # def test_train_inverse_problem_restore():
|
||||
# # tmpdir = "tests/tmp_restore_inv"
|
||||
# # poisson_problem = InversePoisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# # n = 100
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# # pinn = PINN(problem=poisson_problem,
|
||||
# # model=model,
|
||||
# # extra_features=None,
|
||||
# # loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn,
|
||||
# # max_epochs=5,
|
||||
# # accelerator='cpu',
|
||||
# # default_root_dir=tmpdir)
|
||||
# # trainer.train()
|
||||
# # ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# # t = ntrainer.train(
|
||||
# # ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
|
||||
# # import shutil
|
||||
# # shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# # # TODO fix asap. Basically sampling few variables
|
||||
# # # works only if both variables are in a range.
|
||||
# # # if one is fixed and the other not, this will
|
||||
# # # not work. This test also needs to be fixed and
|
||||
# # # insert in test problem not in test pinn.
|
||||
# # def test_train_cpu_sampling_few_vars():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['x'])
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['y'])
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'cpu'})
|
||||
# # trainer.train()
|
||||
|
||||
|
||||
# def test_train_extra_feats_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # TODO, fix GitHub actions to run also on GPU
|
||||
# # def test_train_gpu():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_gpu(): #TODO fix ASAP
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_2():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_extra_feats():
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
|
||||
# # def test_train_2_extra_feats():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_optimizer_kwargs():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_lr_scheduler():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(
|
||||
# # problem,
|
||||
# # model,
|
||||
# # lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
# # lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
# # )
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # # def test_train_batch():
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
|
||||
# # # def test_train_batch_2():
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # # param = [0, 3]
|
||||
# # # for i, truth_key in zip(param, expected_keys):
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(50, save_loss=i)
|
||||
# # # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # if torch.cuda.is_available():
|
||||
|
||||
# # # def test_gpu_train():
|
||||
# # # pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 100
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
# # def test_gpu_train_nobatch():
|
||||
# # pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 100
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
|
||||
@@ -1,105 +1,187 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.problem import AbstractProblem
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.problem import AbstractProblem
|
||||
from pina.condition import InputOutputPointsCondition
|
||||
from pina.solvers import ReducedOrderModelSolver
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.loss import LpLoss
|
||||
from pina.problem.zoo import Poisson2DSquareProblem
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# class NeuralOperatorProblem(AbstractProblem):
|
||||
# input_variables = ['u_0', 'u_1']
|
||||
# output_variables = [f'u_{i}' for i in range(100)]
|
||||
# conditions = {'data' : Condition(input_points=
|
||||
# LabelTensor(torch.rand(10, 2),
|
||||
# input_variables),
|
||||
# output_points=
|
||||
# LabelTensor(torch.rand(10, 100),
|
||||
# output_variables))}
|
||||
class LabelTensorProblem(AbstractProblem):
|
||||
input_variables = ['u_0', 'u_1']
|
||||
output_variables = ['u']
|
||||
conditions = {
|
||||
'data': Condition(
|
||||
input_points=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']),
|
||||
output_points=LabelTensor(torch.randn(20, 1), ['u'])),
|
||||
}
|
||||
|
||||
|
||||
# # make the problem + extra feats
|
||||
# class AE(torch.nn.Module):
|
||||
# def __init__(self, input_dimensions, rank):
|
||||
# super().__init__()
|
||||
# self.encode = FeedForward(input_dimensions, rank, layers=[input_dimensions//4])
|
||||
# self.decode = FeedForward(rank, input_dimensions, layers=[input_dimensions//4])
|
||||
# class AE_missing_encode(torch.nn.Module):
|
||||
# def __init__(self, input_dimensions, rank):
|
||||
# super().__init__()
|
||||
# self.encode = FeedForward(input_dimensions, rank, layers=[input_dimensions//4])
|
||||
# class AE_missing_decode(torch.nn.Module):
|
||||
# def __init__(self, input_dimensions, rank):
|
||||
# super().__init__()
|
||||
# self.decode = FeedForward(rank, input_dimensions, layers=[input_dimensions//4])
|
||||
|
||||
# rank = 10
|
||||
# problem = NeuralOperatorProblem()
|
||||
# interpolation_net = FeedForward(len(problem.input_variables),
|
||||
# rank)
|
||||
# reduction_net = AE(len(problem.output_variables), rank)
|
||||
|
||||
# def test_constructor():
|
||||
# ReducedOrderModelSolver(problem=problem,reduction_network=reduction_net,
|
||||
# interpolation_network=interpolation_net)
|
||||
# with pytest.raises(SyntaxError):
|
||||
# ReducedOrderModelSolver(problem=problem,
|
||||
# reduction_network=AE_missing_encode(
|
||||
# len(problem.output_variables), rank),
|
||||
# interpolation_network=interpolation_net)
|
||||
# ReducedOrderModelSolver(problem=problem,
|
||||
# reduction_network=AE_missing_decode(
|
||||
# len(problem.output_variables), rank),
|
||||
# interpolation_network=interpolation_net)
|
||||
class TensorProblem(AbstractProblem):
|
||||
input_variables = ['u_0', 'u_1']
|
||||
output_variables = ['u']
|
||||
conditions = {
|
||||
'data': Condition(
|
||||
input_points=torch.randn(20, 2),
|
||||
output_points=torch.randn(20, 1))
|
||||
}
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# solver = ReducedOrderModelSolver(problem = problem,reduction_network=reduction_net,
|
||||
# interpolation_network=interpolation_net, loss=LpLoss())
|
||||
# trainer = Trainer(solver=solver, max_epochs=3, accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
class AE(torch.nn.Module):
|
||||
def __init__(self, input_dimensions, rank):
|
||||
super().__init__()
|
||||
self.encode = FeedForward(
|
||||
input_dimensions, rank, layers=[input_dimensions//4])
|
||||
self.decode = FeedForward(
|
||||
rank, input_dimensions, layers=[input_dimensions//4])
|
||||
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# solver = ReducedOrderModelSolver(problem=problem,
|
||||
# reduction_network=reduction_net,
|
||||
# interpolation_network=interpolation_net,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=solver,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=solver, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
class AE_missing_encode(torch.nn.Module):
|
||||
def __init__(self, input_dimensions, rank):
|
||||
super().__init__()
|
||||
self.encode = FeedForward(
|
||||
input_dimensions, rank, layers=[input_dimensions//4])
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# solver = ReducedOrderModelSolver(problem=problem,
|
||||
# reduction_network=reduction_net,
|
||||
# interpolation_network=interpolation_net,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=solver,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_solver = ReducedOrderModelSolver.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=15.ckpt',
|
||||
# problem = problem,reduction_network=reduction_net,
|
||||
# interpolation_network=interpolation_net)
|
||||
# test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
# assert new_solver.forward(test_pts).shape == (20, 100)
|
||||
# assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_solver.forward(test_pts),
|
||||
# solver.forward(test_pts))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
class AE_missing_decode(torch.nn.Module):
|
||||
def __init__(self, input_dimensions, rank):
|
||||
super().__init__()
|
||||
self.decode = FeedForward(
|
||||
rank, input_dimensions, layers=[input_dimensions//4])
|
||||
|
||||
|
||||
rank = 10
|
||||
model = AE(2, 1)
|
||||
interpolation_net = FeedForward(2, rank)
|
||||
reduction_net = AE(1, rank)
|
||||
|
||||
|
||||
def test_constructor():
|
||||
problem = TensorProblem()
|
||||
ReducedOrderModelSolver(problem=problem,
|
||||
interpolation_network=interpolation_net,
|
||||
reduction_network=reduction_net)
|
||||
ReducedOrderModelSolver(problem=LabelTensorProblem(),
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net)
|
||||
assert ReducedOrderModelSolver.accepted_conditions_types == InputOutputPointsCondition
|
||||
with pytest.raises(SyntaxError):
|
||||
ReducedOrderModelSolver(problem=problem,
|
||||
reduction_network=AE_missing_encode(
|
||||
len(problem.output_variables), rank),
|
||||
interpolation_network=interpolation_net)
|
||||
ReducedOrderModelSolver(problem=problem,
|
||||
reduction_network=AE_missing_decode(
|
||||
len(problem.output_variables), rank),
|
||||
interpolation_network=interpolation_net)
|
||||
with pytest.raises(ValueError):
|
||||
ReducedOrderModelSolver(problem=Poisson2DSquareProblem(),
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(use_lt, batch_size, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = ReducedOrderModelSolver(problem=problem,
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
test_size=0.,
|
||||
val_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
for v in solver.model.values():
|
||||
assert (isinstance(v, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(use_lt, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = ReducedOrderModelSolver(problem=problem,
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
for v in solver.model.values():
|
||||
assert (isinstance(v, OptimizedModule))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(use_lt, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = ReducedOrderModelSolver(problem=problem,
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.8,
|
||||
val_size=0.1,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
for v in solver.model.values():
|
||||
assert (isinstance(v, OptimizedModule))
|
||||
|
||||
|
||||
def test_train_load_restore():
|
||||
dir = "tests/test_solvers/tmp/"
|
||||
problem = LabelTensorProblem()
|
||||
solver = ReducedOrderModelSolver(problem=problem,
|
||||
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
test_size=0.1,
|
||||
val_size=0.,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
# restore
|
||||
ntrainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',)
|
||||
ntrainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt')
|
||||
# loading
|
||||
new_solver = ReducedOrderModelSolver.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem,
|
||||
reduction_network=reduction_net,
|
||||
interpolation_network=interpolation_net)
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
|
||||
@@ -1,449 +0,0 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solvers import SAPINN as PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.loss import LpLoss
|
||||
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
# in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
|
||||
# out2_ = LabelTensor(torch.rand(60, 1), ['u'])
|
||||
|
||||
|
||||
# class InversePoisson(SpatialProblem, InverseProblem):
|
||||
# '''
|
||||
# Problem definition for the Poisson equation.
|
||||
# '''
|
||||
# output_variables = ['u']
|
||||
# x_min = -2
|
||||
# x_max = 2
|
||||
# y_min = -2
|
||||
# y_max = 2
|
||||
# data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
|
||||
# data_output = LabelTensor(torch.rand(10, 1), ['u'])
|
||||
# spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
|
||||
# # define the ranges for the parameters
|
||||
# unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
|
||||
|
||||
# def laplace_equation(input_, output_, params_):
|
||||
# '''
|
||||
# Laplace equation with a force term.
|
||||
# '''
|
||||
# force_term = torch.exp(
|
||||
# - 2*(input_.extract(['x']) - params_['mu1'])**2
|
||||
# - 2*(input_.extract(['y']) - params_['mu2'])**2)
|
||||
# delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
|
||||
|
||||
# return delta_u - force_term
|
||||
|
||||
# # define the conditions for the loss (boundary conditions, equation, data)
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max],
|
||||
# 'y': y_max}),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma2': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': y_min
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma3': Condition(location=CartesianDomain(
|
||||
# {'x': x_max, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'gamma4': Condition(location=CartesianDomain(
|
||||
# {'x': x_min, 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=FixedValue(0.0, components=['u'])),
|
||||
# 'D': Condition(location=CartesianDomain(
|
||||
# {'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
# }),
|
||||
# equation=Equation(laplace_equation)),
|
||||
# 'data': Condition(input_points=data_input.extract(['x', 'y']),
|
||||
# output_points=data_output)
|
||||
# }
|
||||
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2': Condition(
|
||||
# location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3': Condition(
|
||||
# location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4': Condition(
|
||||
# location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D': Condition(
|
||||
# input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
# equation=my_laplace),
|
||||
# 'data': Condition(
|
||||
# input_points=in_,
|
||||
# output_points=out_),
|
||||
# 'data2': Condition(
|
||||
# input_points=in2_,
|
||||
# output_points=out2_)
|
||||
# }
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['x']) * torch.pi) *
|
||||
# torch.sin(x.extract(['y']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
# # make the problem
|
||||
# poisson_problem = Poisson()
|
||||
# model = FeedForward(len(poisson_problem.input_variables),
|
||||
# len(poisson_problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# extra_feats = [myFeature()]
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# PINN(problem=poisson_problem, model=model, extra_features=None)
|
||||
# with pytest.raises(ValueError):
|
||||
# PINN(problem=poisson_problem, model=model, extra_features=None,
|
||||
# weights_function=1)
|
||||
|
||||
|
||||
# def test_constructor_extra_feats():
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(poisson_problem.input_variables) + 1,
|
||||
# len(poisson_problem.output_variables))
|
||||
# PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
# def test_log():
|
||||
# poisson_problem.discretise_domain(100)
|
||||
# solver = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
|
||||
# trainer.train()
|
||||
# # assert the logged metrics are correct
|
||||
# logged_metrics = sorted(list(trainer.logged_metrics.keys()))
|
||||
# total_metrics = sorted(
|
||||
# list([key + '_loss' for key in poisson_problem.conditions.keys()])
|
||||
# + ['mean_loss'])
|
||||
# assert logged_metrics == total_metrics
|
||||
|
||||
# def test_train_restore():
|
||||
# tmpdir = "tests/tmp_restore"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=5,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
|
||||
# t = ntrainer.train(
|
||||
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
|
||||
# 'checkpoints/epoch=4-step=10.ckpt')
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_load():
|
||||
# tmpdir = "tests/tmp_load"
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# def test_train_inverse_problem_cpu():
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem = poisson_problem, model=model,
|
||||
# extra_features=None, loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn, max_epochs=1,
|
||||
# accelerator='cpu', batch_size=20)
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # # TODO does not currently work
|
||||
# # def test_train_inverse_problem_restore():
|
||||
# # tmpdir = "tests/tmp_restore_inv"
|
||||
# # poisson_problem = InversePoisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# # n = 100
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# # pinn = PINN(problem=poisson_problem,
|
||||
# # model=model,
|
||||
# # extra_features=None,
|
||||
# # loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn,
|
||||
# # max_epochs=5,
|
||||
# # accelerator='cpu',
|
||||
# # default_root_dir=tmpdir)
|
||||
# # trainer.train()
|
||||
# # ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# # t = ntrainer.train(
|
||||
# # ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
|
||||
# # import shutil
|
||||
# # shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
# def test_train_inverse_problem_load():
|
||||
# tmpdir = "tests/tmp_load_inv"
|
||||
# poisson_problem = InversePoisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
|
||||
# n = 100
|
||||
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model,
|
||||
# extra_features=None,
|
||||
# loss=LpLoss())
|
||||
# trainer = Trainer(solver=pinn,
|
||||
# max_epochs=15,
|
||||
# accelerator='cpu',
|
||||
# default_root_dir=tmpdir)
|
||||
# trainer.train()
|
||||
# new_pinn = PINN.load_from_checkpoint(
|
||||
# f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
|
||||
# problem = poisson_problem, model=model)
|
||||
# test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
|
||||
# assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
|
||||
# assert new_pinn.forward(test_pts).extract(
|
||||
# ['u']).shape == pinn.forward(test_pts).extract(['u']).shape
|
||||
# torch.testing.assert_close(
|
||||
# new_pinn.forward(test_pts).extract(['u']),
|
||||
# pinn.forward(test_pts).extract(['u']))
|
||||
# import shutil
|
||||
# shutil.rmtree(tmpdir)
|
||||
|
||||
# # # TODO fix asap. Basically sampling few variables
|
||||
# # # works only if both variables are in a range.
|
||||
# # # if one is fixed and the other not, this will
|
||||
# # # not work. This test also needs to be fixed and
|
||||
# # # insert in test problem not in test pinn.
|
||||
# # def test_train_cpu_sampling_few_vars():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['x'])
|
||||
# # poisson_problem.discretise_domain(n, 'random', locations=['gamma4'], variables=['y'])
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'cpu'})
|
||||
# # trainer.train()
|
||||
|
||||
|
||||
# def test_train_extra_feats_cpu():
|
||||
# poisson_problem = Poisson()
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn = PINN(problem=poisson_problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||
# trainer.train()
|
||||
|
||||
|
||||
# # TODO, fix GitHub actions to run also on GPU
|
||||
# # def test_train_gpu():
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_gpu(): #TODO fix ASAP
|
||||
# # poisson_problem = Poisson()
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
|
||||
# # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
# # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
|
||||
# # trainer.train()
|
||||
|
||||
# # def test_train_2():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_extra_feats():
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
|
||||
# # def test_train_2_extra_feats():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_optimizer_kwargs():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # def test_train_with_lr_scheduler():
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 10
|
||||
# # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # param = [0, 3]
|
||||
# # for i, truth_key in zip(param, expected_keys):
|
||||
# # pinn = PINN(
|
||||
# # problem,
|
||||
# # model,
|
||||
# # lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
# # lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
# # )
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(50, save_loss=i)
|
||||
# # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # # def test_train_batch():
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
|
||||
# # # def test_train_batch_2():
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 10
|
||||
# # # expected_keys = [[], list(range(0, 50, 3))]
|
||||
# # # param = [0, 3]
|
||||
# # # for i, truth_key in zip(param, expected_keys):
|
||||
# # # pinn = PINN(problem, model, batch_size=6)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(50, save_loss=i)
|
||||
# # # assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
|
||||
# # if torch.cuda.is_available():
|
||||
|
||||
# # # def test_gpu_train():
|
||||
# # # pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||
# # # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # # n = 100
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # # pinn.train(5)
|
||||
|
||||
# # def test_gpu_train_nobatch():
|
||||
# # pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# # n = 100
|
||||
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# # pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# # pinn.train(5)
|
||||
|
||||
159
tests/test_solvers/test_self_adaptive_pinn.py
Normal file
159
tests/test_solvers/test_self_adaptive_pinn.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.solvers import SelfAdaptivePINN as SAPINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.problem.zoo import (
|
||||
Poisson2DSquareProblem as Poisson,
|
||||
InversePoisson2DSquareProblem as InversePoisson
|
||||
)
|
||||
from pina.condition import (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# make the problem
|
||||
problem = Poisson()
|
||||
problem.discretise_domain(50)
|
||||
inverse_problem = InversePoisson()
|
||||
inverse_problem.discretise_domain(50)
|
||||
model = FeedForward(
|
||||
len(problem.input_variables),
|
||||
len(problem.output_variables)
|
||||
)
|
||||
|
||||
# add input-output condition to test supervised learning
|
||||
input_pts = torch.rand(50, len(problem.input_variables))
|
||||
input_pts = LabelTensor(input_pts, problem.input_variables)
|
||||
output_pts = torch.rand(50, len(problem.output_variables))
|
||||
output_pts = LabelTensor(output_pts, problem.output_variables)
|
||||
problem.conditions['data'] = Condition(
|
||||
input_points=input_pts,
|
||||
output_points=output_pts
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("weight_fn", [torch.nn.Sigmoid(), torch.nn.Tanh()])
|
||||
def test_constructor(problem, weight_fn):
|
||||
with pytest.raises(ValueError):
|
||||
SAPINN(model=model, problem=problem, weight_function=1)
|
||||
solver = SAPINN(problem=problem, model=model, weight_function=weight_fn)
|
||||
|
||||
assert solver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition,
|
||||
InputPointsEquationCondition,
|
||||
DomainEquationCondition
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_wrong_batch(problem):
|
||||
with pytest.raises(NotImplementedError):
|
||||
solver = SAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=10,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.)
|
||||
trainer.train()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(problem, compile):
|
||||
solver = SAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=1.,
|
||||
val_size=0.,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(problem, compile):
|
||||
solver = SAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(problem, compile):
|
||||
solver = SAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
|
||||
for model in solver.models]))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("problem", [problem, inverse_problem])
|
||||
def test_train_load_restore(problem):
|
||||
dir = "tests/test_solvers/tmp"
|
||||
problem = problem
|
||||
solver = SAPINN(model=model, problem=problem)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.7,
|
||||
val_size=0.2,
|
||||
test_size=0.1,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# loading
|
||||
new_solver = SAPINN.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == (
|
||||
solver.forward(test_pts).shape
|
||||
)
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
@@ -1,143 +1,133 @@
|
||||
import torch
|
||||
import pytest
|
||||
from pina.problem import AbstractProblem, SpatialProblem
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.condition import InputOutputPointsCondition
|
||||
from pina.problem import AbstractProblem
|
||||
from pina.solvers import SupervisedSolver
|
||||
from pina.model import FeedForward
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.operators import laplacian
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.trainer import Trainer
|
||||
|
||||
# in_ = LabelTensor(torch.tensor([[0., 1.]]), ['u_0', 'u_1'])
|
||||
# out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
from torch._dynamo.eval_frame import OptimizedModule
|
||||
|
||||
|
||||
# class NeuralOperatorProblem(AbstractProblem):
|
||||
# input_variables = ['u_0', 'u_1']
|
||||
# output_variables = ['u']
|
||||
|
||||
# conditions = {
|
||||
# 'data': Condition(input_points=in_, output_points=out_),
|
||||
# }
|
||||
class LabelTensorProblem(AbstractProblem):
|
||||
input_variables = ['u_0', 'u_1']
|
||||
output_variables = ['u']
|
||||
conditions = {
|
||||
'data': Condition(
|
||||
input_points=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']),
|
||||
output_points=LabelTensor(torch.randn(20, 1), ['u'])),
|
||||
}
|
||||
|
||||
|
||||
# class myFeature(torch.nn.Module):
|
||||
# """
|
||||
# Feature: sin(x)
|
||||
# """
|
||||
|
||||
# def __init__(self):
|
||||
# super(myFeature, self).__init__()
|
||||
|
||||
# def forward(self, x):
|
||||
# t = (torch.sin(x.extract(['u_0']) * torch.pi) *
|
||||
# torch.sin(x.extract(['u_1']) * torch.pi))
|
||||
# return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
class TensorProblem(AbstractProblem):
|
||||
input_variables = ['u_0', 'u_1']
|
||||
output_variables = ['u']
|
||||
conditions = {
|
||||
'data': Condition(
|
||||
input_points=torch.randn(20, 2),
|
||||
output_points=torch.randn(20, 1))
|
||||
}
|
||||
|
||||
|
||||
# problem = NeuralOperatorProblem()
|
||||
# extra_feats = [myFeature()]
|
||||
# model = FeedForward(len(problem.input_variables), len(problem.output_variables))
|
||||
# model_extra_feats = FeedForward(
|
||||
# len(problem.input_variables) + 1, len(problem.output_variables))
|
||||
model = FeedForward(2, 1)
|
||||
|
||||
|
||||
# def test_constructor():
|
||||
# SupervisedSolver(problem=problem, model=model)
|
||||
def test_constructor():
|
||||
SupervisedSolver(problem=TensorProblem(), model=model)
|
||||
SupervisedSolver(problem=LabelTensorProblem(), model=model)
|
||||
assert SupervisedSolver.accepted_conditions_types == (
|
||||
InputOutputPointsCondition
|
||||
)
|
||||
|
||||
|
||||
# test_constructor()
|
||||
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_train(use_lt, batch_size, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=batch_size,
|
||||
train_size=1.,
|
||||
test_size=0.,
|
||||
val_size=0.,
|
||||
compile=compile)
|
||||
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# def laplace_equation(input_, output_):
|
||||
# force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
|
||||
# torch.sin(input_.extract(['y']) * torch.pi))
|
||||
# delta_u = laplacian(output_.extract(['u']), input_)
|
||||
# return delta_u - force_term
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_validation(use_lt, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
val_size=0.1,
|
||||
test_size=0.,
|
||||
compile=compile)
|
||||
trainer.train()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# my_laplace = Equation(laplace_equation)
|
||||
@pytest.mark.parametrize("use_lt", [True, False])
|
||||
@pytest.mark.parametrize("compile", [True, False])
|
||||
def test_solver_test(use_lt, compile):
|
||||
problem = LabelTensorProblem() if use_lt else TensorProblem()
|
||||
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=2,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.8,
|
||||
val_size=0.1,
|
||||
test_size=0.1,
|
||||
compile=compile)
|
||||
trainer.test()
|
||||
if trainer.compile:
|
||||
assert (isinstance(solver.model, OptimizedModule))
|
||||
|
||||
|
||||
# class Poisson(SpatialProblem):
|
||||
# output_variables = ['u']
|
||||
# spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
def test_train_load_restore():
|
||||
dir = "tests/test_solvers/tmp/"
|
||||
problem = LabelTensorProblem()
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
trainer = Trainer(solver=solver,
|
||||
max_epochs=5,
|
||||
accelerator='cpu',
|
||||
batch_size=None,
|
||||
train_size=0.9,
|
||||
test_size=0.1,
|
||||
val_size=0.,
|
||||
default_root_dir=dir)
|
||||
trainer.train()
|
||||
|
||||
# conditions = {
|
||||
# 'gamma1':
|
||||
# Condition(domain=CartesianDomain({
|
||||
# 'x': [0, 1],
|
||||
# 'y': 1
|
||||
# }),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma2':
|
||||
# Condition(domain=CartesianDomain({
|
||||
# 'x': [0, 1],
|
||||
# 'y': 0
|
||||
# }),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma3':
|
||||
# Condition(domain=CartesianDomain({
|
||||
# 'x': 1,
|
||||
# 'y': [0, 1]
|
||||
# }),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'gamma4':
|
||||
# Condition(domain=CartesianDomain({
|
||||
# 'x': 0,
|
||||
# 'y': [0, 1]
|
||||
# }),
|
||||
# equation=FixedValue(0.0)),
|
||||
# 'D':
|
||||
# Condition(domain=CartesianDomain({
|
||||
# 'x': [0, 1],
|
||||
# 'y': [0, 1]
|
||||
# }),
|
||||
# equation=my_laplace),
|
||||
# 'data':
|
||||
# Condition(input_points=in_, output_points=out_)
|
||||
# }
|
||||
# restore
|
||||
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
new_trainer.train(
|
||||
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
|
||||
'epoch=4-step=5.ckpt')
|
||||
|
||||
# def poisson_sol(self, pts):
|
||||
# return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
# torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi ** 2)
|
||||
# loading
|
||||
new_solver = SupervisedSolver.load_from_checkpoint(
|
||||
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
|
||||
problem=problem, model=model)
|
||||
|
||||
# truth_solution = poisson_sol
|
||||
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
|
||||
assert new_solver.forward(test_pts).shape == (20, 1)
|
||||
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
|
||||
torch.testing.assert_close(
|
||||
new_solver.forward(test_pts),
|
||||
solver.forward(test_pts))
|
||||
|
||||
|
||||
# def test_wrong_constructor():
|
||||
# poisson_problem = Poisson()
|
||||
# with pytest.raises(ValueError):
|
||||
# SupervisedSolver(problem=poisson_problem, model=model)
|
||||
|
||||
|
||||
# def test_train_cpu():
|
||||
# solver = SupervisedSolver(problem=problem, model=model)
|
||||
# trainer = Trainer(solver=solver,
|
||||
# max_epochs=200,
|
||||
# accelerator='gpu',
|
||||
# batch_size=5,
|
||||
# train_size=1,
|
||||
# test_size=0.,
|
||||
# val_size=0.)
|
||||
# trainer.train()
|
||||
# test_train_cpu()
|
||||
|
||||
|
||||
# def test_extra_features_constructor():
|
||||
# SupervisedSolver(problem=problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
|
||||
|
||||
# def test_extra_features_train_cpu():
|
||||
# solver = SupervisedSolver(problem=problem,
|
||||
# model=model_extra_feats,
|
||||
# extra_features=extra_feats)
|
||||
# trainer = Trainer(solver=solver,
|
||||
# max_epochs=200,
|
||||
# accelerator='gpu',
|
||||
# batch_size=5)
|
||||
# trainer.train()
|
||||
# rm directories
|
||||
import shutil
|
||||
shutil.rmtree('tests/test_solvers/tmp')
|
||||
|
||||
42
tests/test_weighting/test_standard_weighting.py
Normal file
42
tests/test_weighting/test_standard_weighting.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pina import Trainer
|
||||
from pina.solvers import PINN
|
||||
from pina.model import FeedForward
|
||||
from pina.problem.zoo import Poisson2DSquareProblem
|
||||
from pina.loss import ScalarWeighting
|
||||
|
||||
problem = Poisson2DSquareProblem()
|
||||
model = FeedForward(len(problem.input_variables), len(problem.output_variables))
|
||||
condition_names = problem.conditions.keys()
|
||||
print(problem.conditions.keys())
|
||||
|
||||
@pytest.mark.parametrize("weights",
|
||||
[1, 1., dict(zip(condition_names, [1]*len(condition_names)))])
|
||||
def test_constructor(weights):
|
||||
ScalarWeighting(weights=weights)
|
||||
|
||||
@pytest.mark.parametrize("weights", ['a', [1,2,3]])
|
||||
def test_wrong_constructor(weights):
|
||||
with pytest.raises(ValueError):
|
||||
ScalarWeighting(weights=weights)
|
||||
|
||||
@pytest.mark.parametrize("weights",
|
||||
[1, 1., dict(zip(condition_names, [1]*len(condition_names)))])
|
||||
def test_aggregate(weights):
|
||||
weighting = ScalarWeighting(weights=weights)
|
||||
losses = dict(zip(condition_names, [torch.randn(1) for _ in range(len(condition_names))]))
|
||||
weighting.aggregate(losses=losses)
|
||||
|
||||
@pytest.mark.parametrize("weights",
|
||||
[1, 1., dict(zip(condition_names, [1]*len(condition_names)))])
|
||||
def test_train_aggregation(weights):
|
||||
weighting = ScalarWeighting(weights=weights)
|
||||
problem.discretise_domain(50)
|
||||
solver = PINN(
|
||||
problem=problem,
|
||||
model=model,
|
||||
weighting=weighting)
|
||||
trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
|
||||
trainer.train()
|
||||
Reference in New Issue
Block a user