* solvers -> solver
* adaptive_functions -> adaptive_function
* callbacks -> callback
* operators -> operator
* pinns -> physics_informed_solver
* layers -> block
This commit is contained in:
Dario Coscia
2025-02-19 11:35:43 +01:00
committed by Nicola Demo
parent 810d215ca0
commit df673cad4e
90 changed files with 155 additions and 151 deletions

21
pina/solver/__init__.py Normal file
View File

@@ -0,0 +1,21 @@
__all__ = [
"SolverInterface",
"SingleSolverInterface",
"MultiSolverInterface",
"PINNInterface",
"PINN",
"GradientPINN",
"CausalPINN",
"CompetitivePINN",
"SelfAdaptivePINN",
"RBAPINN",
"SupervisedSolver",
"ReducedOrderModelSolver",
"GAROM",
]
from .solver import SolverInterface, SingleSolverInterface, MultiSolverInterface
from .physic_informed_solver import *
from .supervised import SupervisedSolver
from .rom import ReducedOrderModelSolver
from .garom import GAROM

307
pina/solver/garom.py Normal file
View File

@@ -0,0 +1,307 @@
""" Module for GAROM """
import torch
from .solver import MultiSolverInterface
from ..utils import check_consistency
from ..loss.loss_interface import LossInterface
from ..condition import InputOutputPointsCondition
from ..utils import check_consistency
from ..loss import LossInterface, PowerLoss
from torch.nn.modules.loss import _Loss
class GAROM(MultiSolverInterface):
"""
GAROM solver class. This class implements Generative Adversarial
Reduced Order Model solver, using user specified ``models`` to solve
a specific order reduction``problem``.
.. seealso::
**Original reference**: Coscia, D., Demo, N., & Rozza, G. (2023).
*Generative Adversarial Reduced Order Modelling*.
DOI: `arXiv preprint arXiv:2305.15881.
<https://doi.org/10.48550/arXiv.2305.15881>`_.
"""
accepted_conditions_types = InputOutputPointsCondition
def __init__(
self,
problem,
generator,
discriminator,
loss=None,
optimizer_generator=None,
optimizer_discriminator=None,
scheduler_generator=None,
scheduler_discriminator=None,
gamma=0.3,
lambda_k=0.001,
regularizer=False,
):
"""
:param AbstractProblem problem: The formualation of the problem.
:param torch.nn.Module generator: The neural network model to use
for the generator.
:param torch.nn.Module discriminator: The neural network model to use
for the discriminator.
:param torch.nn.Module loss: The loss function used as minimizer,
default ``None``. If ``loss`` is ``None`` the defualt
``PowerLoss(p=1)`` is used, as in the original paper.
:param Optimizer optimizer_generator: The neural
network optimizer to use for the generator network
, default is `torch.optim.Adam`.
:param Optimizer optimizer_discriminator: The neural
network optimizer to use for the discriminator network
, default is `torch.optim.Adam`.
:param Scheduler scheduler_generator: Learning
rate scheduler for the generator.
:param Scheduler scheduler_discriminator: Learning
rate scheduler for the discriminator.
:param dict scheduler_discriminator_kwargs: LR scheduler constructor keyword args.
:param gamma: Ratio of expected loss for generator and discriminator, defaults to 0.3.
:type gamma: float
:param lambda_k: Learning rate for control theory optimization, defaults to 0.001.
:type lambda_k: float
:param regularizer: Regularization term in the GAROM loss, defaults to False.
:type regularizer: bool
.. warning::
The algorithm works only for data-driven model. Hence in the ``problem`` definition
the codition must only contain ``input_points`` (e.g. coefficient parameters, time
parameters), and ``output_points``.
"""
# set loss
if loss is None:
loss = PowerLoss(p=1)
super().__init__(
models=[generator, discriminator],
problem=problem,
optimizers=[optimizer_generator, optimizer_discriminator],
schedulers=[
scheduler_generator,
scheduler_discriminator,
],
use_lt=False
)
# check consistency
check_consistency(loss, (LossInterface, _Loss, torch.nn.Module),
subclass=False)
self._loss = loss
# set automatic optimization for GANs
self.automatic_optimization = False
# check consistency
check_consistency(gamma, float)
check_consistency(lambda_k, float)
check_consistency(regularizer, bool)
# began hyperparameters
self.k = 0
self.gamma = gamma
self.lambda_k = lambda_k
self.regularizer = float(regularizer)
def forward(self, x, mc_steps=20, variance=False):
"""
Forward step for GAROM solver
:param x: The input tensor.
:type x: torch.Tensor
:param mc_steps: Number of montecarlo samples to approximate the
expected value, defaults to 20.
:type mc_steps: int
:param variance: Returining also the sample variance of the solution, defaults to False.
:type variance: bool
:return: The expected value of the generator distribution. If ``variance=True`` also the
sample variance is returned.
:rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor)
"""
# sampling
field_sample = [self.sample(x) for _ in range(mc_steps)]
field_sample = torch.stack(field_sample)
# extract mean
mean = field_sample.mean(dim=0)
if variance:
var = field_sample.var(dim=0)
return mean, var
return mean
def sample(self, x):
# sampling
return self.generator(x)
def _train_generator(self, parameters, snapshots):
"""
Private method to train the generator network.
"""
optimizer = self.optimizer_generator
optimizer.zero_grad()
generated_snapshots = self.sample(parameters)
# generator loss
r_loss = self._loss(snapshots, generated_snapshots)
d_fake = self.discriminator(
[generated_snapshots, parameters]
)
g_loss = (
self._loss(d_fake, generated_snapshots) + self.regularizer * r_loss
)
# backward step
g_loss.backward()
optimizer.step()
return r_loss, g_loss
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
:param torch.Tensor outputs: The output from the model for the
current batch.
:param tuple batch: The current batch of data.
:param int batch_idx: The index of the current batch.
:return: Whatever is returned by the parent
method ``on_train_batch_end``.
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization
.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def _train_discriminator(self, parameters, snapshots):
"""
Private method to train the discriminator network.
"""
optimizer = self.optimizer_discriminator
optimizer.zero_grad()
# Generate a batch of images
generated_snapshots = self.sample(parameters)
# Discriminator pass
d_real = self.discriminator([snapshots, parameters])
d_fake = self.discriminator(
[generated_snapshots, parameters]
)
# evaluate loss
d_loss_real = self._loss(d_real, snapshots)
d_loss_fake = self._loss(d_fake, generated_snapshots.detach())
d_loss = d_loss_real - self.k * d_loss_fake
# backward step
d_loss.backward()
optimizer.step()
return d_loss_real, d_loss_fake, d_loss
def _update_weights(self, d_loss_real, d_loss_fake):
"""
Private method to Update the weights of the generator and discriminator
networks.
"""
diff = torch.mean(self.gamma * d_loss_real - d_loss_fake)
# Update weight term for fake samples
self.k += self.lambda_k * diff.item()
self.k = min(max(self.k, 0), 1) # Constraint to interval [0, 1]
return diff
def optimization_cycle(self, batch):
"""GAROM solver training step.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
condition_loss = {}
for condition_name, points in batch:
parameters, snapshots = points['input_points'], points['output_points']
d_loss_real, d_loss_fake, d_loss = self._train_discriminator(
parameters, snapshots
)
r_loss, g_loss = self._train_generator(parameters, snapshots)
diff = self._update_weights(d_loss_real, d_loss_fake)
condition_loss[condition_name] = r_loss
# some extra logging
self.store_log(
"d_loss",
float(d_loss),
self.get_batch_size(batch)
)
self.store_log(
"g_loss",
float(g_loss),
self.get_batch_size(batch)
)
self.store_log(
"stability_metric",
float(d_loss_real + torch.abs(diff)),
self.get_batch_size(batch)
)
return condition_loss
def validation_step(self, batch):
condition_loss = {}
for condition_name, points in batch:
parameters, snapshots = points['input_points'], points['output_points']
snapshots_gen = self.generator(parameters)
condition_loss[condition_name] = self._loss(snapshots, snapshots_gen)
loss = self.weighting.aggregate(condition_loss)
self.store_log('val_loss', loss, self.get_batch_size(batch))
return loss
def test_step(self, batch):
condition_loss = {}
for condition_name, points in batch:
parameters, snapshots = points['input_points'], points['output_points']
snapshots_gen = self.generator(parameters)
condition_loss[condition_name] = self._loss(snapshots, snapshots_gen)
loss = self.weighting.aggregate(condition_loss)
self.store_log('test_loss', loss, self.get_batch_size(batch))
return loss
@property
def generator(self):
return self.models[0]
@property
def discriminator(self):
return self.models[1]
@property
def optimizer_generator(self):
return self.optimizers[0].instance
@property
def optimizer_discriminator(self):
return self.optimizers[1].instance
@property
def scheduler_generator(self):
return self.schedulers[0].instance
@property
def scheduler_discriminator(self):
return self.schedulers[1].instance

View File

@@ -0,0 +1,17 @@
__all__ = [
"PINNInterface",
"PINN",
"GradientPINN",
"CausalPINN",
"CompetitivePINN",
"SelfAdaptivePINN",
"RBAPINN",
]
from .pinn_interface import PINNInterface
from .pinn import PINN
from .rba_pinn import RBAPINN
from .causal_pinn import CausalPINN
from .gradient_pinn import GradientPINN
from .competitive_pinn import CompetitivePINN
from .self_adaptive_pinn import SelfAdaptivePINN

View File

@@ -0,0 +1,207 @@
""" Module for Causal PINN. """
import torch
from pina.problem import TimeDependentProblem
from .pinn import PINN
from pina.utils import check_consistency
class CausalPINN(PINN):
r"""
Causal Physics Informed Neural Network (CausalPINN) solver class.
This class implements Causal Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Causal Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N_t}\sum_{i=1}^{N_t}
\omega_{i}\mathcal{L}_r(t_i),
where:
.. math::
\mathcal{L}_r(t) = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i, t)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i, t))
and,
.. math::
\omega_i = \exp\left(\epsilon \sum_{k=1}^{i-1}\mathcal{L}_r(t_k)\right).
:math:`\epsilon` is an hyperparameter, default set to :math:`100`, while
:math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
Perdikaris. "Respecting causality for training physics-informed
neural networks." Computer Methods in Applied Mechanics
and Engineering 421 (2024): 116813.
DOI `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
.. note::
This class can only work for problems inheriting
from at least
:class:`~pina.problem.timedep_problem.TimeDependentProblem` class.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eps=100):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
:param float eps: The exponential decay parameter; default `100`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
# checking consistency
check_consistency(eps, (int, float))
self._eps = eps
if not isinstance(self.problem, TimeDependentProblem):
raise ValueError(
"Casual PINN works only for problems"
"inheriting from TimeDependentProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the Causal PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# split sequentially ordered time tensors into chunks
chunks, labels = self._split_tensor_into_chunks(samples)
# compute residuals - this correspond to ordered loss functions
# values for each time step. Apply `flatten` to ensure obtaining
# a tensor of shape #chunks after concatenating the residuals
time_loss = []
for chunk in chunks:
chunk.labels = labels
# classical PINN loss
residual = self.compute_residual(samples=chunk, equation=equation)
loss_val = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
time_loss.append(loss_val)
# concatenate residuals
time_loss = torch.stack(time_loss)
# compute weights without storing the gradient
with torch.no_grad():
weights = self._compute_weights(time_loss)
return (weights * time_loss).mean()
@property
def eps(self):
"""
The exponential decay parameter.
"""
return self._eps
@eps.setter
def eps(self, value):
"""
Setter method for the eps parameter.
:param float value: The exponential decay parameter.
"""
check_consistency(value, float)
self._eps = value
def _sort_label_tensor(self, tensor):
"""
Sorts the label tensor based on time variables.
:param LabelTensor tensor: The label tensor to be sorted.
:return: The sorted label tensor based on time variables.
:rtype: LabelTensor
"""
# labels input tensors
labels = tensor.labels
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# sort the time tensors (this is very bad for GPU)
_, idx = torch.sort(time_tensor.tensor.flatten())
tensor = tensor[idx]
tensor.labels = labels
return tensor
def _split_tensor_into_chunks(self, tensor):
"""
Splits the label tensor into chunks based on time.
:param LabelTensor tensor: The label tensor to be split.
:return: Tuple containing the chunks and the original labels.
:rtype: Tuple[List[LabelTensor], List]
"""
# extract labels
labels = tensor.labels
# sort input tensor based on time
tensor = self._sort_label_tensor(tensor)
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# count unique tensors in time
_, idx_split = time_tensor.unique(return_counts=True)
# split the tensor based on time
chunks = torch.split(tensor, tuple(idx_split))
return chunks, labels
def _compute_weights(self, loss):
"""
Computes the weights for the physics loss based on the cumulative loss.
:param LabelTensor loss: The physics loss values.
:return: The computed weights for the physics loss.
:rtype: LabelTensor
"""
# compute comulative loss and multiply by epsilon
cumulative_loss = self._eps * torch.cumsum(loss, dim=0)
# return the exponential of the negative weighted cumulative sum
return torch.exp(-cumulative_loss)

View File

@@ -0,0 +1,336 @@
""" Module for Competitive PINN. """
import torch
import copy
from pina.problem import InverseProblem
from .pinn_interface import PINNInterface
from ..solver import MultiSolverInterface
class CompetitivePINN(PINNInterface, MultiSolverInterface):
r"""
Competitive Physics Informed Neural Network (PINN) solver class.
This class implements Competitive Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Competitive Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
with a minimization (on ``model`` parameters) maximation (
on ``discriminator`` parameters) of the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{A}[\mathbf{u}](\mathbf{x}_i))+
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`D` is the discriminator network, which tries to find the points
where the network performs worst, and :math:`\mathcal{L}` is a specific loss
function, default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Zeng, Qi, et al.
"Competitive physics informed networks." International Conference on
Learning Representations, ICLR 2022
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
.. warning::
This solver does not currently support the possibility to pass
``extra_feature``.
"""
def __init__(self,
problem,
model,
discriminator=None,
optimizer_model=None,
optimizer_discriminator=None,
scheduler_model=None,
scheduler_discriminator=None,
weighting=None,
loss=None):
"""
:param AbstractProblem problem: The formulation of the problem.
:param torch.nn.Module model: The neural network model to use
for the model.
:param torch.nn.Module discriminator: The neural network model to use
for the discriminator. If ``None``, the discriminator network will
have the same architecture as the model network.
:param torch.optim.Optimizer optimizer_model: The neural network
optimizer to use for the model network; default `None`.
:param torch.optim.Optimizer optimizer_discriminator: The neural network
optimizer to use for the discriminator network; default `None`.
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
for the model; default `None`.
:param torch.optim.LRScheduler scheduler_discriminator: Learning rate
scheduler for the discriminator; default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
if discriminator is None:
discriminator = copy.deepcopy(model)
super().__init__(models=[model, discriminator],
problem=problem,
optimizers=[optimizer_model, optimizer_discriminator],
schedulers=[scheduler_model, scheduler_discriminator],
weighting=weighting,
loss=loss)
# Set automatic optimization to False
self.automatic_optimization = False
def forward(self, x):
r"""
Forward pass implementation for the PINN solver. It returns the function
evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points
:math:`\mathbf{x}`.
:param LabelTensor x: Input tensor for the PINN solver. It expects
a tensor :math:`N \times D`, where :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem,
:return: PINN solution evaluated at contro points.
:rtype: LabelTensor
"""
return self.neural_net(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
self.optimizer_model.instance.zero_grad()
self.optimizer_discriminator.instance.zero_grad()
loss = super().training_step(batch)
self.optimizer_model.instance.step()
self.optimizer_discriminator.instance.step()
return loss
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the Competitive PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# Train the model for one step
with torch.no_grad():
discriminator_bets = self.discriminator(samples)
loss_val = self._train_model(samples, equation, discriminator_bets)
# Detach samples from the existing computational graph and
# create a new one by setting requires_grad to True.
# In alternative set `retain_graph=True`.
samples = samples.detach()
samples.requires_grad_()
# Train the discriminator for one step
discriminator_bets = self.discriminator(samples)
self._train_discriminator(samples, equation, discriminator_bets)
return loss_val
def loss_data(self, input_pts, output_pts):
"""
The data loss for the CompetitivePINN solver. It computes the loss
between the network output against the true solution.
:param LabelTensor input_tensor: The input to the neural networks.
:param LabelTensor output_tensor: The true solution to compare the
network solution.
:return: The computed data loss.
:rtype: torch.Tensor
"""
loss_val = (super().loss_data(input_pts, output_pts))
# prepare for optimizer step called in training step
loss_val.backward()
return loss_val
def configure_optimizers(self):
"""
Optimizer configuration for the Competitive PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.neural_net.parameters())
self.optimizer_discriminator.hook(self.discriminator.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_discriminator.hook(self.optimizer_discriminator)
return (
[self.optimizer_model.instance,
self.optimizer_discriminator.instance],
[self.scheduler_model.instance,
self.scheduler_discriminator.instance]
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
:param torch.Tensor outputs: The output from the model for the
current batch.
:param tuple batch: The current batch of data.
:param int batch_idx: The index of the current batch.
:return: Whatever is returned by the parent
method ``on_train_batch_end``.
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization
.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def _train_discriminator(self, samples, equation, discriminator_bets):
"""
Trains the discriminator network of the Competitive PINN.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation representing
the physics.
:param Tensor discriminator_bets: Predictions made by the discriminator
network.
"""
# Compute residual. Detach since discriminator weights are fixed
residual = self.compute_residual(
samples=samples, equation=equation
).detach()
# Compute competitive residual, then maximise the loss
competitive_residual = residual * discriminator_bets
loss_val = -self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual,
)
# prepare for optimizer step called in training step
self.manual_backward(loss_val)
return
def _train_model(self, samples, equation, discriminator_bets):
"""
Trains the model network of the Competitive PINN.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation representing
the physics.
:param Tensor discriminator_bets: Predictions made by the discriminator.
network.
:return: The computed data loss.
:rtype: torch.Tensor
"""
# Compute residual
residual = self.compute_residual(samples=samples, equation=equation)
with torch.no_grad():
loss_residual = self.loss(torch.zeros_like(residual), residual)
# Compute competitive residual. Detach discriminator_bets
# to optimize only the generator model
competitive_residual = residual * discriminator_bets.detach()
loss_val = self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual,
)
# prepare for optimizer step called in training step
self.manual_backward(loss_val)
return loss_residual
@property
def neural_net(self):
"""
Returns the neural network model.
:return: The neural network model.
:rtype: torch.nn.Module
"""
return self.models[0]
@property
def discriminator(self):
"""
Returns the discriminator model (if applicable).
:return: The discriminator model.
:rtype: torch.nn.Module
"""
return self.models[1]
@property
def optimizer_model(self):
"""
Returns the optimizer associated with the neural network model.
:return: The optimizer for the neural network model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[0]
@property
def optimizer_discriminator(self):
"""
Returns the optimizer associated with the discriminator (if applicable).
:return: The optimizer for the discriminator.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[1]
@property
def scheduler_model(self):
"""
Returns the scheduler associated with the neural network model.
:return: The scheduler for the neural network model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[0]
@property
def scheduler_discriminator(self):
"""
Returns the scheduler associated with the discriminator (if applicable).
:return: The scheduler for the discriminator.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[1]

View File

@@ -0,0 +1,124 @@
""" Module for Gradient PINN. """
import torch
from .pinn import PINN
from pina.operator import grad
from pina.problem import SpatialProblem
class GradientPINN(PINN):
r"""
Gradient Physics Informed Neural Network (GradientPINN) solver class.
This class implements Gradient Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Gradient Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} =& \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)) + \\
&\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Yu, Jeremy, et al. "Gradient-enhanced
physics-informed neural networks for forward and inverse
PDE problems." Computer Methods in Applied Mechanics
and Engineering 393 (2022): 114823.
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
.. note::
This class can only work for problems inheriting
from at least :class:`~pina.problem.spatial_problem.SpatialProblem`
class.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem. It must
inherit from at least
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute
the gradient of the loss.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
if not isinstance(self.problem, SpatialProblem):
raise ValueError(
"Gradient PINN computes the gradient of the "
"PINN loss with respect to the spatial "
"coordinates, thus the PINA problem must be "
"a SpatialProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the GPINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# classical PINN loss
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
# gradient PINN loss
loss_value = loss_value.reshape(-1, 1)
loss_value.labels = ["__loss"]
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
g_loss_phys = self.loss(
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
)
return loss_value + g_loss_phys

View File

@@ -0,0 +1,118 @@
""" Module for Physics Informed Neural Network. """
import torch
from .pinn_interface import PINNInterface
from ..solver import SingleSolverInterface
from ...problem import InverseProblem
class PINN(PINNInterface, SingleSolverInterface):
r"""
Physics Informed Neural Network (PINN) solver class.
This class implements Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
Perdikaris, P., Wang, S., & Yang, L. (2021).
Physics-informed machine learning. Nature Reviews Physics, 3, 422-440.
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return loss_value
def configure_optimizers(self):
"""
Optimizer configuration for the PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized.
self.optimizer.hook(self.model.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler.hook(self.optimizer)
return (
[self.optimizer.instance],
[self.scheduler.instance]
)

View File

@@ -0,0 +1,191 @@
""" Module for Physics Informed Neural Network Interface."""
from abc import ABCMeta, abstractmethod
import torch
from torch.nn.modules.loss import _Loss
from ..solver import SolverInterface
from ...utils import check_consistency
from ...loss.loss_interface import LossInterface
from ...problem import InverseProblem
from ...condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
class PINNInterface(SolverInterface, metaclass=ABCMeta):
"""
Base PINN solver class. This class implements the Solver Interface
for Physics Informed Neural Network solver.
This class can be used to define PINNs with multiple ``optimizers``,
and/or ``models``.
By default it takes :class:`~pina.problem.abstract_problem.AbstractProblem`,
so the user can choose what type of problem the implemented solver,
inheriting from this class, is designed to solve.
"""
accepted_conditions_types = (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
def __init__(self,
problem,
loss=None,
**kwargs):
"""
:param AbstractProblem problem: A problem definition instance.
:param torch.nn.Module loss: The loss function to be minimized,
default `None`.
"""
if loss is None:
loss = torch.nn.MSELoss()
super().__init__(problem=problem,
use_lt=True,
**kwargs)
# check consistency
check_consistency(loss, (LossInterface, _Loss), subclass=False)
# assign variables
self._loss = loss
# inverse problem handling
if isinstance(self.problem, InverseProblem):
self._params = self.problem.unknown_parameters
self._clamp_params = self._clamp_inverse_problem_params
else:
self._params = None
self._clamp_params = lambda: None
self.__metric = None
def optimization_cycle(self, batch):
return self._run_optimization_cycle(batch, self.loss_phys)
@torch.set_grad_enabled(True)
def validation_step(self, batch):
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log('val_loss', loss, self.get_batch_size(batch))
return loss
@torch.set_grad_enabled(True)
def test_step(self, batch):
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log('test_loss', loss, self.get_batch_size(batch))
return loss
def loss_data(self, input_pts, output_pts):
"""
The data loss for the PINN solver. It computes the loss between
the network output against the true solution. This function
should not be override if not intentionally.
:param LabelTensor input_pts: The input to the neural networks.
:param LabelTensor output_pts: The true solution to compare the
network solution.
:return: The residual loss averaged on the input coordinates
:rtype: torch.Tensor
"""
return self._loss(self.forward(input_pts), output_pts)
@abstractmethod
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics informed solver based on given
samples and equation. This method must be override by all inherited
classes and it is the core to define a new physics informed solver.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
pass
def compute_residual(self, samples, equation):
"""
Compute the residual for Physics Informed learning. This function
returns the :obj:`~pina.equation.equation.Equation` specified in the
:obj:`~pina.condition.Condition` evaluated at the ``samples`` points.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The residual of the neural network solution.
:rtype: LabelTensor
"""
try:
residual = equation.residual(samples, self.forward(samples))
except TypeError:
# this occurs when the function has three inputs (inverse problem)
residual = equation.residual(
samples,
self.forward(samples),
self._params
)
return residual
def _residual_loss(self, samples, equation):
residuals = self.compute_residual(samples, equation)
return self.loss(residuals, torch.zeros_like(residuals))
def _run_optimization_cycle(self, batch, loss_residuals):
condition_loss = {}
for condition_name, points in batch:
self.__metric = condition_name
# if equations are passed
if 'output_points' not in points:
input_pts = points['input_points']
condition = self.problem.conditions[condition_name]
loss = loss_residuals(
input_pts.requires_grad_(),
condition.equation
)
# if data are passed
else:
input_pts = points['input_points']
output_pts = points['output_points']
loss = self.loss_data(
input_pts=input_pts.requires_grad_(),
output_pts=output_pts
)
# append loss
condition_loss[condition_name] = loss
# clamp unknown parameters in InverseProblem (if needed)
self._clamp_params()
return condition_loss
def _clamp_inverse_problem_params(self):
"""
Clamps the parameters of the inverse problem
solver to the specified ranges.
"""
for v in self._params:
self._params[v].data.clamp_(
self.problem.unknown_parameter_domain.range_[v][0],
self.problem.unknown_parameter_domain.range_[v][1],
)
@property
def loss(self):
"""
Loss used for training.
"""
return self._loss
@property
def current_condition_name(self):
"""
The current condition name.
"""
return self.__metric

View File

@@ -0,0 +1,172 @@
""" Module for Residual-Based Attention PINN. """
from copy import deepcopy
import torch
from .pinn import PINN
from ...utils import check_consistency
class RBAPINN(PINN):
r"""
Residual-based Attention PINN (RBAPINN) solver class.
This class implements Residual-based Attention Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Residual-based Attention Physics Informed Neural Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega}
\lambda_{\Omega}^{i} \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
\lambda_{\partial\Omega}^{i} \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the weights as
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
Residual-based Attention Physics Informed Neural Network computes
the weights by updating them at every epoch as follows
.. math::
\lambda_i^{k+1} \leftarrow \gamma\lambda_i^{k} +
\eta\frac{\lvert r_i\rvert}{\max_j \lvert r_j\rvert},
where :math:`r_i` denotes the residual at point :math:`i`,
:math:`\gamma` denotes the decay rate, and :math:`\eta` is
the learning rate for the weights' update.
.. seealso::
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
Nikolaos Stergiopulos, and George E. Karniadakis.
"Residual-based attention and connection to information
bottleneck theory in PINNs".
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
DOI: `10.1016/
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eta=0.001,
gamma=0.999):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
:param float | int eta: The learning rate for the weights of the
residual; default 0.001.
:param float gamma: The decay parameter in the update of the weights
of the residual. Must be between 0 and 1; default 0.999.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
# check consistency
check_consistency(eta, (float, int))
check_consistency(gamma, float)
assert (
0 < gamma < 1
), f"Invalid range: expected 0 < gamma < 1, got {gamma=}"
self.eta = eta
self.gamma = gamma
# initialize weights
self.weights = {}
for condition_name in problem.conditions:
self.weights[condition_name] = 0
# define vectorial loss
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
# for now RBAPINN is implemented only for batch_size = None
def on_train_start(self):
if self.trainer.batch_size is not None:
raise NotImplementedError("RBAPINN only works with full batch "
"size, set batch_size=None inside the "
"Trainer to use the solver.")
return super().on_train_start()
def _vect_to_scalar(self, loss_value):
"""
Elaboration of the pointwise loss.
:param LabelTensor loss_value: the matrix of pointwise loss.
:return: the scalar loss.
:rtype LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the residual-based attention PINN
solver based on given samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
cond = self.current_condition_name
r_norm = (
self.eta * torch.abs(residual)
/ (torch.max(torch.abs(residual)) + 1e-12)
)
self.weights[cond] = (self.gamma*self.weights[cond] + r_norm).detach()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)

View File

@@ -0,0 +1,430 @@
""" Module for Self-Adaptive PINN. """
import torch
from copy import deepcopy
from pina.utils import check_consistency
from pina.problem import InverseProblem
from ..solver import MultiSolverInterface
from .pinn_interface import PINNInterface
class Weights(torch.nn.Module):
"""
This class aims to implements the mask model for the
self-adaptive weights of the Self-Adaptive PINN solver.
"""
def __init__(self, func):
"""
:param torch.nn.Module func: the mask module of SAPINN.
"""
super().__init__()
check_consistency(func, torch.nn.Module)
self.sa_weights = torch.nn.Parameter(torch.Tensor())
self.func = func
def forward(self):
"""
Forward pass implementation for the mask module.
It returns the function on the weights evaluation.
:return: evaluation of self adaptive weights through the mask.
:rtype: torch.Tensor
"""
return self.func(self.sa_weights)
class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
r"""
Self Adaptive Physics Informed Neural Network (SelfAdaptivePINN)
solver class. This class implements Self-Adaptive Physics Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Self Adapive Physics Informed Neural Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
integrating the pointwise loss evaluation through a mask :math:`m` and
self adaptive weights that permit to focus the loss function on
specific training samples.
The loss function to solve the problem is
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega} m
\left( \lambda_{\Omega}^{i} \right) \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
m \left( \lambda_{\partial\Omega}^{i} \right) \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the self adaptive weights as
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
Self Adaptive Physics Informed Neural Network identifies the solution
and appropriate self adaptive weights by solving the following problem
.. math::
\min_{w} \max_{\lambda_{\Omega}^k, \lambda_{\partial \Omega}^s}
\mathcal{L} ,
where :math:`w` denotes the network parameters, and
:math:`\mathcal{L}` is a specific loss
function, default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
"Self-adaptive physics-informed neural networks."
Journal of Computational Physics 474 (2023): 111722.
DOI: `10.1016/
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
"""
def __init__(self,
problem,
model,
weight_function=torch.nn.Sigmoid(),
optimizer_model=None,
optimizer_weights=None,
scheduler_model=None,
scheduler_weights=None,
weighting=None,
loss=None):
"""
:param AbstractProblem problem: The formulation of the problem.
:param torch.nn.Module model: The neural network model to use for
the model.
:param torch.nn.Module weight_function: The neural network model
related to the Self-Adaptive PINN mask; default `torch.nn.Sigmoid()`
:param torch.optim.Optimizer optimizer_model: The neural network
optimizer to use for the model network; default `None`.
:param torch.optim.Optimizer optimizer_weights: The neural network
optimizer to use for mask model; default `None`.
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
for the model; default `None`.
:param torch.optim.LRScheduler scheduler_weights: Learning rate
scheduler for the mask model; default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
# check consistency weitghs_function
check_consistency(weight_function, torch.nn.Module)
# create models for weights
weights_dict = {}
for condition_name in problem.conditions:
weights_dict[condition_name] = Weights(weight_function)
weights_dict = torch.nn.ModuleDict(weights_dict)
super().__init__(models=[model, weights_dict],
problem=problem,
optimizers=[optimizer_model, optimizer_weights],
schedulers=[scheduler_model, scheduler_weights],
weighting=weighting,
loss=loss)
# Set automatic optimization to False
self.automatic_optimization = False
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
def forward(self, x):
"""
Forward pass implementation for the PINN
solver. It returns the function
evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points
:math:`\mathbf{x}`.
:param LabelTensor x: Input tensor for the SAPINN solver. It expects
a tensor :math:`N \\times D`, where :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem,
:return: PINN solution.
:rtype: LabelTensor
"""
return self.model(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
self.optimizer_model.instance.zero_grad()
self.optimizer_weights.instance.zero_grad()
loss = super().training_step(batch)
self.optimizer_model.instance.step()
self.optimizer_weights.instance.step()
return loss
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the SAPINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: torch.Tensor
"""
# Train the weights
weighted_loss = self._loss_phys(samples, equation)
loss_value = -weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value)
# Detach samples from the existing computational graph and
# create a new one by setting requires_grad to True.
# In alternative set `retain_graph=True`.
samples = samples.detach()
samples.requires_grad_()# = True
# Train the model
weighted_loss = self._loss_phys(samples, equation)
loss_value = weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value)
return loss_value
def loss_data(self, input_pts, output_pts):
"""
Computes the data loss for the SAPINN solver based on input and
output. It computes the loss between the
network output against the true solution.
:param LabelTensor input_pts: The input to the neural networks.
:param LabelTensor output_pts: The true solution to compare the
network solution.
:return: The computed data loss.
:rtype: torch.Tensor
"""
residual = self.forward(input_pts) - output_pts
loss = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
loss_value = self._vect_to_scalar(loss).as_subclass(torch.Tensor)
self.manual_backward(loss_value)
return loss_value
def configure_optimizers(self):
"""
Optimizer configuration for the SelfAdaptive PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.model.parameters())
self.optimizer_weights.hook(self.weights_dict.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_weights.hook(self.optimizer_weights)
return (
[self.optimizer_model.instance,
self.optimizer_weights.instance],
[self.scheduler_model.instance,
self.scheduler_weights.instance]
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
:param torch.Tensor outputs: The output from the model for the
current batch.
:param tuple batch: The current batch of data.
:param int batch_idx: The index of the current batch.
:return: Whatever is returned by the parent
method ``on_train_batch_end``.
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization
.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def on_train_start(self):
"""
This method is called at the start of the training for setting
the self adaptive weights as parameters of the mask model.
:return: Whatever is returned by the parent
method ``on_train_start``.
:rtype: Any
"""
if self.trainer.batch_size is not None:
raise NotImplementedError("SelfAdaptivePINN only works with full "
"batch size, set batch_size=None inside "
"the Trainer to use the solver.")
device = torch.device(
self.trainer._accelerator_connector._accelerator_flag
)
# Initialize the self adaptive weights only for training points
for condition_name, tensor in (
self.trainer.data_module.train_dataset.input_points.items()
):
self.weights_dict[condition_name].sa_weights.data = (
torch.rand((tensor.shape[0], 1), device=device)
)
return super().on_train_start()
def on_load_checkpoint(self, checkpoint):
"""
Override the Pytorch Lightning ``on_load_checkpoint`` to handle
checkpoints for Self-Adaptive Weights. This method should not be
overridden if not intentionally.
:param dict checkpoint: Pytorch Lightning checkpoint dict.
"""
# First initialize self-adaptive weights with correct shape,
# then load the values from the checkpoint.
for condition_name, _ in self.problem.input_pts.items():
shape = checkpoint['state_dict'][
f"_pina_models.1.{condition_name}.sa_weights"
].shape
self.weights_dict[condition_name].sa_weights.data = (
torch.rand(shape)
)
return super().on_load_checkpoint(checkpoint)
def _loss_phys(self, samples, equation):
"""
Computation of the physical loss for SelfAdaptive PINN solver.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: the governing equation representing
the physics.
:return: tuple with weighted and not weighted scalar loss
:rtype: List[LabelTensor, LabelTensor]
"""
residual = self.compute_residual(samples, equation)
weights = self.weights_dict[self.current_condition_name].forward()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(weights * loss_value)
def _vect_to_scalar(self, loss_value):
"""
Elaboration of the pointwise loss through the mask model and the
self adaptive weights
:param LabelTensor loss_value: the matrix of pointwise loss
:return: the scalar loss
:rtype LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
@property
def model(self):
"""
Return the mask models associate to the application of
the mask to the self adaptive weights for each loss that
compones the global loss of the problem.
:return: The ModuleDict for mask models.
:rtype: torch.nn.ModuleDict
"""
return self.models[0]
@property
def weights_dict(self):
"""
Return the mask models associate to the application of
the mask to the self adaptive weights for each loss that
compones the global loss of the problem.
:return: The ModuleDict for mask models.
:rtype: torch.nn.ModuleDict
"""
return self.models[1]
@property
def scheduler_model(self):
"""
Returns the scheduler associated with the neural network model.
:return: The scheduler for the neural network model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[0]
@property
def scheduler_weights(self):
"""
Returns the scheduler associated with the mask model (if applicable).
:return: The scheduler for the mask model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[1]
@property
def optimizer_model(self):
"""
Returns the optimizer associated with the neural network model.
:return: The optimizer for the neural network model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[0]
@property
def optimizer_weights(self):
"""
Returns the optimizer associated with the mask model (if applicable).
:return: The optimizer for the mask model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[1]

188
pina/solver/rom.py Normal file
View File

@@ -0,0 +1,188 @@
""" Module for ReducedOrderModelSolver """
import torch
from pina.solver import SupervisedSolver
class ReducedOrderModelSolver(SupervisedSolver):
r"""
ReducedOrderModelSolver solver class. This class implements a
Reduced Order Model solver, using user specified ``reduction_network`` and
``interpolation_network`` to solve a specific ``problem``.
The Reduced Order Model approach aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}(\mu)](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}(\mu)](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
This is done by using two neural networks. The ``reduction_network``, which
contains an encoder :math:`\mathcal{E}_{\rm{net}}`, a decoder
:math:`\mathcal{D}_{\rm{net}}`; and an ``interpolation_network``
:math:`\mathcal{I}_{\rm{net}}`. The input is assumed to be discretised in
the spatial dimensions.
The following loss function is minimized during training
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{E}_{\rm{net}}[\mathbf{u}(\mu_i)] -
\mathcal{I}_{\rm{net}}[\mu_i]) +
\mathcal{L}(
\mathcal{D}_{\rm{net}}[\mathcal{E}_{\rm{net}}[\mathbf{u}(\mu_i)]] -
\mathbf{u}(\mu_i))
where :math:`\mathcal{L}` is a specific loss function, default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Hesthaven, Jan S., and Stefano Ubbiali.
"Non-intrusive reduced order modeling of nonlinear problems
using neural networks." Journal of Computational
Physics 363 (2018): 55-78.
DOI `10.1016/j.jcp.2018.02.037
<https://doi.org/10.1016/j.jcp.2018.02.037>`_.
.. note::
The specified ``reduction_network`` must contain two methods,
namely ``encode`` for input encoding and ``decode`` for decoding the
former result. The ``interpolation_network`` network ``forward`` output
represents the interpolation of the latent space obtain with
``reduction_network.encode``.
.. note::
This solver uses the end-to-end training strategy, i.e. the
``reduction_network`` and ``interpolation_network`` are trained
simultaneously. For reference on this trainig strategy look at:
Pichi, Federico, Beatriz Moya, and Jan S. Hesthaven.
"A graph convolutional autoencoder approach to model order reduction
for parametrized PDEs." Journal of
Computational Physics 501 (2024): 112762.
DOI
`10.1016/j.jcp.2024.112762 <https://doi.org/10.1016/
j.jcp.2024.112762>`_.
.. warning::
This solver works only for data-driven model. Hence in the ``problem``
definition the codition must only contain ``input_points``
(e.g. coefficient parameters, time parameters), and ``output_points``.
.. warning::
This solver does not currently support the possibility to pass
``extra_feature``.
"""
def __init__(
self,
problem,
reduction_network,
interpolation_network,
loss=None,
optimizer=None,
scheduler=None,
weighting=None,
use_lt=True,
):
"""
:param AbstractProblem problem: The formualation of the problem.
:param torch.nn.Module reduction_network: The reduction network used
for reducing the input space. It must contain two methods,
namely ``encode`` for input encoding and ``decode`` for decoding the
former result.
:param torch.nn.Module interpolation_network: The interpolation network
for interpolating the control parameters to latent space obtain by
the ``reduction_network`` encoding.
:param torch.nn.Module loss: The loss function used as minimizer,
default :class:`torch.nn.MSELoss`.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default is :class:`torch.optim.Adam`.
:param torch.optim.LRScheduler scheduler: Learning
rate scheduler.
:param WeightingInterface weighting: The loss weighting to use.
:param bool use_lt: Using LabelTensors as input during training.
"""
model = torch.nn.ModuleDict(
{
"reduction_network": reduction_network,
"interpolation_network": interpolation_network,
}
)
super().__init__(
model=model,
problem=problem,
loss=loss,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
use_lt=use_lt
)
# assert reduction object contains encode/ decode
if not hasattr(self.model["reduction_network"], "encode"):
raise SyntaxError(
"reduction_network must have encode method. "
"The encode method should return a lower "
"dimensional representation of the input."
)
if not hasattr(self.model["reduction_network"], "decode"):
raise SyntaxError(
"reduction_network must have decode method. "
"The decode method should return a high "
"dimensional representation of the encoding."
)
def forward(self, x):
"""
Forward pass implementation for the solver. It finds the encoder
representation by calling ``interpolation_network.forward`` on the
input, and maps this representation to output space by calling
``reduction_network.decode``.
:param torch.Tensor x: Input tensor.
:return: Solver solution.
:rtype: torch.Tensor
"""
reduction_network = self.model["reduction_network"]
interpolation_network = self.model["interpolation_network"]
return reduction_network.decode(interpolation_network(x))
def loss_data(self, input_pts, output_pts):
"""
The data loss for the ReducedOrderModelSolver solver.
It computes the loss between
the network output against the true solution. This function
should not be override if not intentionally.
:param LabelTensor input_tensor: The input to the neural networks.
:param LabelTensor output_tensor: The true solution to compare the
network solution.
:return: The residual loss averaged on the input coordinates
:rtype: torch.Tensor
"""
# extract networks
reduction_network = self.model["reduction_network"]
interpolation_network = self.model["interpolation_network"]
# encoded representations loss
encode_repr_inter_net = interpolation_network(input_pts)
encode_repr_reduction_network = reduction_network.encode(output_pts)
loss_encode = self.loss(
encode_repr_inter_net, encode_repr_reduction_network
)
# reconstruction loss
loss_reconstruction = self.loss(
reduction_network.decode(encode_repr_reduction_network), output_pts
)
return loss_encode + loss_reconstruction

435
pina/solver/solver.py Normal file
View File

@@ -0,0 +1,435 @@
""" Solver module. """
import lightning
import torch
import sys
from abc import ABCMeta, abstractmethod
from ..problem import AbstractProblem
from ..optim import Optimizer, Scheduler, TorchOptimizer, TorchScheduler
from ..loss import WeightingInterface
from ..loss.scalar_weighting import _NoWeighting
from ..utils import check_consistency, labelize_forward
from torch._dynamo.eval_frame import OptimizedModule
class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
"""
SolverInterface base class. This class is a wrapper of LightningModule.
"""
def __init__(self,
problem,
weighting,
use_lt):
"""
:param problem: A problem definition instance.
:type problem: AbstractProblem
:param weighting: The loss weighting to use.
:type weighting: WeightingInterface
:param use_lt: Using LabelTensors as input during training.
:type use_lt: bool
"""
super().__init__()
# check consistency of the problem
check_consistency(problem, AbstractProblem)
self._check_solver_consistency(problem)
self._pina_problem = problem
# check consistency of the weighting and hook the condition names
if weighting is None:
weighting = _NoWeighting()
check_consistency(weighting, WeightingInterface)
self._pina_weighting = weighting
weighting.condition_names = list(self._pina_problem.conditions.keys())
# check consistency use_lt
check_consistency(use_lt, bool)
self._use_lt = use_lt
# if use_lt is true add extract operation in input
if use_lt is True:
self.forward = labelize_forward(
forward=self.forward,
input_variables=problem.input_variables,
output_variables=problem.output_variables,
)
# PINA private attributes (some are overridden by derived classes)
self._pina_problem = problem
self._pina_models = None
self._pina_optimizers = None
self._pina_schedulers = None
def _check_solver_consistency(self, problem):
for condition in problem.conditions.values():
check_consistency(condition, self.accepted_conditions_types)
def _optimization_cycle(self, batch):
"""
Perform a private optimization cycle by computing the loss for each
condition in the given batch. The loss are later aggregated using the
specific weighting schema.
:param batch: A batch of data, where each element is a tuple containing
a condition name and a dictionary of points.
:type batch: list of tuples (str, dict)
:return: The computed loss for the all conditions in the batch,
cast to a subclass of `torch.Tensor`. It should return a dict
containing the condition name and the associated scalar loss.
:rtype: dict(torch.Tensor)
"""
losses = self.optimization_cycle(batch)
for name, value in losses.items():
self.store_log(f'{name}_loss', value.item(), self.get_batch_size(batch))
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
return loss
def training_step(self, batch):
"""
Solver training step.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
loss = self._optimization_cycle(batch=batch)
self.store_log('train_loss', loss, self.get_batch_size(batch))
return loss
def validation_step(self, batch):
"""
Solver validation step.
:param batch: The batch element in the dataloader.
:type batch: tuple
"""
loss = self._optimization_cycle(batch=batch)
self.store_log('val_loss', loss, self.get_batch_size(batch))
def test_step(self, batch):
"""
Solver test step.
:param batch: The batch element in the dataloader.
:type batch: tuple
"""
loss = self._optimization_cycle(batch=batch)
self.store_log('test_loss', loss, self.get_batch_size(batch))
def store_log(self, name, value, batch_size):
self.log(name=name,
value=value,
batch_size=batch_size,
**self.trainer.logging_kwargs
)
@abstractmethod
def forward(self, *args, **kwargs):
pass
@abstractmethod
def optimization_cycle(self, batch):
"""
Perform an optimization cycle by computing the loss for each condition
in the given batch.
:param batch: A batch of data, where each element is a tuple containing
a condition name and a dictionary of points.
:type batch: list of tuples (str, dict)
:return: The computed loss for the all conditions in the batch,
cast to a subclass of `torch.Tensor`. It should return a dict
containing the condition name and the associated scalar loss.
:rtype: dict(torch.Tensor)
"""
pass
@property
def problem(self):
"""
The problem formulation.
"""
return self._pina_problem
@property
def use_lt(self):
"""
Using LabelTensor in training.
"""
return self._use_lt
@property
def weighting(self):
"""
The weighting mechanism.
"""
return self._pina_weighting
@staticmethod
def get_batch_size(batch):
# assuming batch is a custom Batch object
batch_size = 0
for data in batch:
batch_size += len(data[1]['input_points'])
return batch_size
@staticmethod
def default_torch_optimizer():
return TorchOptimizer(torch.optim.Adam, lr=0.001)
@staticmethod
def default_torch_scheduler():
return TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
def on_train_start(self):
"""
Hook that is called before training begins.
Used to compile the model if the trainer is set to compile.
"""
super().on_train_start()
if self.trainer.compile:
self._compile_model()
def on_test_start(self):
"""
Hook that is called before training begins.
Used to compile the model if the trainer is set to compile.
"""
super().on_train_start()
if self.trainer.compile and not self._check_already_compiled():
self._compile_model()
def _check_already_compiled(self):
models = self._pina_models
if len(models) == 1 and isinstance(self._pina_models[0],
torch.nn.ModuleDict):
models = list(self._pina_models.values())
for model in models:
if not isinstance(model, (OptimizedModule, torch.nn.ModuleDict)):
return False
return True
@staticmethod
def _perform_compilation(model):
model_device = next(model.parameters()).device
try:
if model_device == torch.device("mps:0"):
model = torch.compile(model, backend="eager")
else:
model = torch.compile(model, backend="inductor")
except Exception as e:
print("Compilation failed, running in normal mode.:\n", e)
return model
class SingleSolverInterface(SolverInterface):
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
use_lt=True):
"""
:param problem: A problem definition instance.
:type problem: AbstractProblem
:param model: A torch nn.Module instances.
:type model: torch.nn.Module
:param Optimizer optimizers: A neural network optimizers to use.
:param Scheduler optimizers: A neural network scheduler to use.
:param WeightingInterface weighting: The loss weighting to use.
:param bool use_lt: Using LabelTensors as input during training.
"""
if optimizer is None:
optimizer = self.default_torch_optimizer()
if scheduler is None:
scheduler = self.default_torch_scheduler()
super().__init__(problem=problem,
use_lt=use_lt,
weighting=weighting)
# check consistency of models argument and encapsulate in list
check_consistency(model, torch.nn.Module)
# check scheduler consistency and encapsulate in list
check_consistency(scheduler, Scheduler)
# check optimizer consistency and encapsulate in list
check_consistency(optimizer, Optimizer)
# initialize the model (needed by Lightining to go to different devices)
self._pina_models = torch.nn.ModuleList([model])
self._pina_optimizers = [optimizer]
self._pina_schedulers = [scheduler]
def forward(self, x):
"""
Forward pass implementation for the solver.
:param torch.Tensor x: Input tensor.
:return: Solver solution.
:rtype: torch.Tensor
"""
x = self.model(x)
return x
def configure_optimizers(self):
"""
Optimizer configuration for the solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
self.optimizer.hook(self.model.parameters())
self.scheduler.hook(self.optimizer)
return (
[self.optimizer.instance],
[self.scheduler.instance]
)
def _compile_model(self):
if isinstance(self._pina_models[0], torch.nn.ModuleDict):
self._compile_module_dict()
else:
self._compile_single_model()
def _compile_module_dict(self):
for name, model in self._pina_models[0].items():
self._pina_models[0][name] = self._perform_compilation(model)
def _compile_single_model(self):
self._pina_models[0] = self._perform_compilation(self._pina_models[0])
@property
def model(self):
"""
Model for training.
"""
return self._pina_models[0]
@property
def scheduler(self):
"""
Scheduler for training.
"""
return self._pina_schedulers[0]
@property
def optimizer(self):
"""
Optimizer for training.
"""
return self._pina_optimizers[0]
class MultiSolverInterface(SolverInterface):
"""
Multiple Solver base class. This class inherits is a wrapper of
SolverInterface class
"""
def __init__(self,
problem,
models,
optimizers=None,
schedulers=None,
weighting=None,
use_lt=True):
"""
:param problem: A problem definition instance.
:type problem: AbstractProblem
:param models: Multiple torch nn.Module instances.
:type model: list[torch.nn.Module] | tuple[torch.nn.Module]
:param list(Optimizer) optimizers: A list of neural network
optimizers to use.
:param list(Scheduler) optimizers: A list of neural network
schedulers to use.
:param WeightingInterface weighting: The loss weighting to use.
:param bool use_lt: Using LabelTensors as input during training.
"""
if not isinstance(models, (list, tuple)) or len(models) < 2:
raise ValueError(
'models should be list[torch.nn.Module] or '
'tuple[torch.nn.Module] with len greater than '
'one.'
)
if any(opt is None for opt in optimizers):
optimizers = [
self.default_torch_optimizer() if opt is None else opt
for opt in optimizers
]
if any(sched is None for sched in schedulers):
schedulers = [
self.default_torch_scheduler() if sched is None else sched
for sched in schedulers
]
super().__init__(problem=problem,
use_lt=use_lt,
weighting=weighting)
# check consistency of models argument and encapsulate in list
check_consistency(models, torch.nn.Module)
# check scheduler consistency and encapsulate in list
check_consistency(schedulers, Scheduler)
# check optimizer consistency and encapsulate in list
check_consistency(optimizers, Optimizer)
# check length consistency optimizers
if len(models) != len(optimizers):
raise ValueError(
"You must define one optimizer for each model."
f"Got {len(models)} models, and {len(optimizers)}"
" optimizers."
)
# initialize the model
self._pina_models = torch.nn.ModuleList(models)
self._pina_optimizers = optimizers
self._pina_schedulers = schedulers
def configure_optimizers(self):
"""Optimizer configuration for the solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
for optimizer, scheduler, model in zip(self.optimizers,
self.schedulers,
self.models):
optimizer.hook(model.parameters())
scheduler.hook(optimizer)
return (
[optimizer.instance for optimizer in self.optimizers],
[scheduler.instance for scheduler in self.schedulers]
)
def _compile_model(self):
for i, model in enumerate(self._pina_models):
if not isinstance(model, torch.nn.ModuleDict):
self._pina_models[i] = self._perform_compilation(model)
@property
def models(self):
"""
The torch model."""
return self._pina_models
@property
def optimizers(self):
"""
The torch model."""
return self._pina_optimizers
@property
def schedulers(self):
"""
The torch model."""
return self._pina_schedulers

117
pina/solver/supervised.py Normal file
View File

@@ -0,0 +1,117 @@
""" Module for SupervisedSolver """
import torch
from torch.nn.modules.loss import _Loss
from .solver import SingleSolverInterface
from ..utils import check_consistency
from ..loss.loss_interface import LossInterface
from ..condition import InputOutputPointsCondition
class SupervisedSolver(SingleSolverInterface):
r"""
SupervisedSolver solver class. This class implements a SupervisedSolver,
using a user specified ``model`` to solve a specific ``problem``.
The Supervised Solver class aims to find
a map between the input :math:`\mathbf{s}:\Omega\rightarrow\mathbb{R}^m`
and the output :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`. The input
can be discretised in space (as in :obj:`~pina.solver.rom.ROMe2eSolver`),
or not (e.g. when training Neural Operators).
Given a model :math:`\mathcal{M}`, the following loss function is
minimized during training:
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathbf{u}_i - \mathcal{M}(\mathbf{v}_i))
where :math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
In this context :math:`\mathbf{u}_i` and :math:`\mathbf{v}_i` means that
we are seeking to approximate multiple (discretised) functions given
multiple (discretised) input functions.
"""
accepted_conditions_types = InputOutputPointsCondition
def __init__(self,
problem,
model,
loss=None,
optimizer=None,
scheduler=None,
weighting=None,
use_lt=True):
"""
:param AbstractProblem problem: The formualation of the problem.
:param torch.nn.Module model: The neural network model to use.
:param torch.nn.Module loss: The loss function used as minimizer,
default :class:`torch.nn.MSELoss`.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default is :class:`torch.optim.Adam`.
:param torch.optim.LRScheduler scheduler: Learning
rate scheduler.
:param WeightingInterface weighting: The loss weighting to use.
:param bool use_lt: Using LabelTensors as input during training.
"""
if loss is None:
loss = torch.nn.MSELoss()
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
use_lt=use_lt)
# check consistency
check_consistency(loss, (LossInterface, _Loss, torch.nn.Module),
subclass=False)
self._loss = loss
def optimization_cycle(self, batch):
"""
Perform an optimization cycle by computing the loss for each condition
in the given batch.
:param batch: A batch of data, where each element is a tuple containing
a condition name and a dictionary of points.
:type batch: list of tuples (str, dict)
:return: The computed loss for the all conditions in the batch,
cast to a subclass of `torch.Tensor`. It should return a dict
containing the condition name and the associated scalar loss.
:rtype: dict(torch.Tensor)
"""
condition_loss = {}
for condition_name, points in batch:
input_pts, output_pts = points['input_points'], points['output_points']
condition_loss[condition_name] = self.loss_data(
input_pts=input_pts, output_pts=output_pts)
return condition_loss
def loss_data(self, input_pts, output_pts):
"""
The data loss for the Supervised solver. It computes the loss between
the network output against the true solution. This function
should not be override if not intentionally.
:param input_pts: The input to the neural networks.
:type input_pts: LabelTensor | torch.Tensor
:param output_pts: The true solution to compare the
network solution.
:type output_pts: LabelTensor | torch.Tensor
:return: The residual loss.
:rtype: torch.Tensor
"""
return self._loss(self.forward(input_pts), output_pts)
@property
def loss(self):
"""
Loss for training.
"""
return self._loss