fix rendering part 2
This commit is contained in:
@@ -63,6 +63,9 @@ class AdvectionProblem(SpatialProblem, TimeDependentProblem):
|
|||||||
training physics-informed neural networks*.
|
training physics-informed neural networks*.
|
||||||
arXiv preprint arXiv:2308.08468 (2023).
|
arXiv preprint arXiv:2308.08468 (2023).
|
||||||
DOI: `arXiv:2308.08468 <https://arxiv.org/abs/2308.08468>`_.
|
DOI: `arXiv:2308.08468 <https://arxiv.org/abs/2308.08468>`_.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = AdvectionProblem(c=1.0)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -49,6 +49,9 @@ class AllenCahnProblem(TimeDependentProblem, SpatialProblem):
|
|||||||
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
||||||
DOI: `10.1016/
|
DOI: `10.1016/
|
||||||
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = AllenCahnProblem()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -59,6 +59,9 @@ class DiffusionReactionProblem(TimeDependentProblem, SpatialProblem):
|
|||||||
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
||||||
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
||||||
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = DiffusionReactionProblem()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -53,6 +53,9 @@ class HelmholtzProblem(SpatialProblem):
|
|||||||
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
||||||
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
||||||
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = HelmholtzProblem()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -50,6 +50,9 @@ class InversePoisson2DSquareProblem(SpatialProblem, InverseProblem):
|
|||||||
Implementation of the inverse 2-dimensional Poisson problem in the square
|
Implementation of the inverse 2-dimensional Poisson problem in the square
|
||||||
domain :math:`[0, 1] \times [0, 1]`,
|
domain :math:`[0, 1] \times [0, 1]`,
|
||||||
with unknown parameter domain :math:`[-1, 1] \times [-1, 1]`.
|
with unknown parameter domain :math:`[-1, 1] \times [-1, 1]`.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = InversePoisson2DSquareProblem()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -30,6 +30,9 @@ class Poisson2DSquareProblem(SpatialProblem):
|
|||||||
r"""
|
r"""
|
||||||
Implementation of the 2-dimensional Poisson problem in the square domain
|
Implementation of the 2-dimensional Poisson problem in the square domain
|
||||||
:math:`[0, 1] \times [0, 1]`.
|
:math:`[0, 1] \times [0, 1]`.
|
||||||
|
|
||||||
|
:Example:
|
||||||
|
>>> problem = Poisson2DSquareProblem()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output_variables = ["u"]
|
output_variables = ["u"]
|
||||||
|
|||||||
@@ -45,16 +45,22 @@ class GAROM(MultiSolverInterface):
|
|||||||
:param torch.nn.Module generator: The generator model.
|
:param torch.nn.Module generator: The generator model.
|
||||||
:param torch.nn.Module discriminator: The discriminator model.
|
:param torch.nn.Module discriminator: The discriminator model.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If ``None``, ``PowerLoss(p=1)`` is used. Default is ``None``.
|
If ``None``, :class:`~pina.loss.power_loss.PowerLoss` with ``p=1``
|
||||||
|
is used. Default is ``None``.
|
||||||
:param Optimizer optimizer_generator: The optimizer for the generator.
|
:param Optimizer optimizer_generator: The optimizer for the generator.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param Optimizer optimizer_discriminator: The optimizer for the
|
|
||||||
discriminator. If `None`, the Adam optimizer is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param Optimizer optimizer_discriminator: The optimizer for the
|
||||||
|
discriminator. If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||||
|
used. Default is ``None``.
|
||||||
:param Scheduler scheduler_generator: The learning rate scheduler for
|
:param Scheduler scheduler_generator: The learning rate scheduler for
|
||||||
the generator.
|
the generator.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param Scheduler scheduler_discriminator: The learning rate scheduler
|
:param Scheduler scheduler_discriminator: The learning rate scheduler
|
||||||
for the discriminator.
|
for the discriminator.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param float gamma: Ratio of expected loss for generator and
|
:param float gamma: Ratio of expected loss for generator and
|
||||||
discriminator. Default is ``0.3``.
|
discriminator. Default is ``0.3``.
|
||||||
:param float lambda_k: Learning rate for control theory optimization.
|
:param float lambda_k: Learning rate for control theory optimization.
|
||||||
@@ -109,7 +115,7 @@ class GAROM(MultiSolverInterface):
|
|||||||
of the solution. Default is ``False``.
|
of the solution. Default is ``False``.
|
||||||
:return: The expected value of the generator distribution. If
|
:return: The expected value of the generator distribution. If
|
||||||
``variance=True``, the method returns also the variance.
|
``variance=True``, the method returns also the variance.
|
||||||
:rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor)
|
:rtype: torch.Tensor | tuple[torch.Tensor, torch.Tensor]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# sampling
|
# sampling
|
||||||
@@ -143,7 +149,7 @@ class GAROM(MultiSolverInterface):
|
|||||||
:param torch.Tensor parameters: The input tensor.
|
:param torch.Tensor parameters: The input tensor.
|
||||||
:param torch.Tensor snapshots: The target tensor.
|
:param torch.Tensor snapshots: The target tensor.
|
||||||
:return: The residual loss and the generator loss.
|
:return: The residual loss and the generator loss.
|
||||||
:rtype: tuple(torch.Tensor, torch.Tensor)
|
:rtype: tuple[torch.Tensor, torch.Tensor]
|
||||||
"""
|
"""
|
||||||
optimizer = self.optimizer_generator
|
optimizer = self.optimizer_generator
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
@@ -170,7 +176,8 @@ class GAROM(MultiSolverInterface):
|
|||||||
|
|
||||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||||
batch.
|
batch.
|
||||||
:param dict batch: The current batch of data.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:param int batch_idx: The index of the current batch.
|
:param int batch_idx: The index of the current batch.
|
||||||
"""
|
"""
|
||||||
# increase by one the counter of optimization to save loggers
|
# increase by one the counter of optimization to save loggers
|
||||||
@@ -187,7 +194,7 @@ class GAROM(MultiSolverInterface):
|
|||||||
:param torch.Tensor parameters: The input tensor.
|
:param torch.Tensor parameters: The input tensor.
|
||||||
:param torch.Tensor snapshots: The target tensor.
|
:param torch.Tensor snapshots: The target tensor.
|
||||||
:return: The residual loss and the generator loss.
|
:return: The residual loss and the generator loss.
|
||||||
:rtype: tuple(torch.Tensor, torch.Tensor)
|
:rtype: tuple[torch.Tensor, torch.Tensor]
|
||||||
"""
|
"""
|
||||||
optimizer = self.optimizer_discriminator
|
optimizer = self.optimizer_discriminator
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
@@ -234,9 +241,12 @@ class GAROM(MultiSolverInterface):
|
|||||||
"""
|
"""
|
||||||
The optimization cycle for the GAROM solver.
|
The optimization cycle for the GAROM solver.
|
||||||
|
|
||||||
:param tuple batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
:return: The loss of the optimization cycle.
|
tuple containing a condition name and a dictionary of points.
|
||||||
:rtype: LabelTensor
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
|
containing the condition name and the associated scalar loss.
|
||||||
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
condition_loss = {}
|
condition_loss = {}
|
||||||
for condition_name, points in batch:
|
for condition_name, points in batch:
|
||||||
@@ -265,7 +275,8 @@ class GAROM(MultiSolverInterface):
|
|||||||
"""
|
"""
|
||||||
The validation step for the PINN solver.
|
The validation step for the PINN solver.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the validation step.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The loss of the validation step.
|
:return: The loss of the validation step.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
@@ -287,7 +298,8 @@ class GAROM(MultiSolverInterface):
|
|||||||
"""
|
"""
|
||||||
The test step for the PINN solver.
|
The test step for the PINN solver.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the test step.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The loss of the test step.
|
:return: The loss of the test step.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -53,14 +53,16 @@ class CausalPINN(PINN):
|
|||||||
.. seealso::
|
.. seealso::
|
||||||
|
|
||||||
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
|
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
|
||||||
Perdikaris. "Respecting causality for training physics-informed
|
Perdikaris.
|
||||||
neural networks." Computer Methods in Applied Mechanics
|
*Respecting causality for training physics-informed
|
||||||
and Engineering 421 (2024): 116813.
|
neural networks.*
|
||||||
DOI `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
|
Computer Methods in Applied Mechanics and Engineering 421 (2024):116813.
|
||||||
|
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
This class is only compatible with problems that inherit from the
|
This class is only compatible with problems that inherit from the
|
||||||
:class:`~pina.problem.TimeDependentProblem` class.
|
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`
|
||||||
|
class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -77,17 +79,19 @@ class CausalPINN(PINN):
|
|||||||
Initialization of the :class:`CausalPINN` class.
|
Initialization of the :class:`CausalPINN` class.
|
||||||
|
|
||||||
:param AbstractProblem problem: The problem to be solved. It must
|
:param AbstractProblem problem: The problem to be solved. It must
|
||||||
inherit from at least :class:`~pina.problem.TimeDependentProblem`.
|
inherit from at least
|
||||||
|
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param torch.optim.Optimizer optimizer: The optimizer to be used
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
|
||||||
If `None`, the constant learning rate scheduler is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
:param float eps: The exponential decay parameter. Default is ``100``.
|
:param float eps: The exponential decay parameter. Default is ``100``.
|
||||||
:raises ValueError: If the problem is not a TimeDependentProblem.
|
:raises ValueError: If the problem is not a TimeDependentProblem.
|
||||||
|
|||||||
@@ -46,8 +46,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
.. seealso::
|
.. seealso::
|
||||||
|
|
||||||
**Original reference**: Zeng, Qi, et al.
|
**Original reference**: Zeng, Qi, et al.
|
||||||
"Competitive physics informed networks." International Conference on
|
*Competitive physics informed networks.*
|
||||||
Learning Representations, ICLR 2022
|
International Conference on Learning Representations, ICLR 2022
|
||||||
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
|
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -72,21 +72,23 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
If `None`, the discriminator is a deepcopy of the ``model``.
|
If `None`, the discriminator is a deepcopy of the ``model``.
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
:param torch.optim.Optimizer optimizer_model: The optimizer of the
|
:param torch.optim.Optimizer optimizer_model: The optimizer of the
|
||||||
``model``. If `None`, the Adam optimizer is used.
|
``model``. If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||||
Default is ``None``.
|
used. Default is ``None``.
|
||||||
:param torch.optim.Optimizer optimizer_discriminator: The optimizer of
|
:param torch.optim.Optimizer optimizer_discriminator: The optimizer of
|
||||||
the ``discriminator``. If `None`, the Adam optimizer is used.
|
the ``discriminator``. If `None`, the :class:`torch.optim.Adam`
|
||||||
Default is ``None``.
|
optimizer is used. Default is ``None``.
|
||||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
:param Scheduler scheduler_model: Learning rate scheduler for the
|
||||||
for the ``model``. If `None`, the constant learning rate scheduler
|
``model``.
|
||||||
is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
:param torch.optim.LRScheduler scheduler_discriminator: Learning rate
|
scheduler is used. Default is ``None``.
|
||||||
scheduler for the ``discriminator``. If `None`, the constant
|
:param Scheduler scheduler_discriminator: Learning rate scheduler for
|
||||||
learning rate scheduler is used. Default is ``None``.
|
the ``discriminator``.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
"""
|
"""
|
||||||
if discriminator is None:
|
if discriminator is None:
|
||||||
@@ -118,7 +120,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
"""
|
"""
|
||||||
Solver training step, overridden to perform manual optimization.
|
Solver training step, overridden to perform manual optimization.
|
||||||
|
|
||||||
:param dict batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The aggregated loss.
|
:return: The aggregated loss.
|
||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
@@ -163,7 +166,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
Optimizer configuration.
|
Optimizer configuration.
|
||||||
|
|
||||||
:return: The optimizers and the schedulers
|
:return: The optimizers and the schedulers
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||||
"""
|
"""
|
||||||
# If the problem is an InverseProblem, add the unknown parameters
|
# If the problem is an InverseProblem, add the unknown parameters
|
||||||
# to the parameters to be optimized
|
# to the parameters to be optimized
|
||||||
@@ -198,7 +201,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
|
|
||||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||||
batch.
|
batch.
|
||||||
:param dict batch: The current batch of data.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:param int batch_idx: The index of the current batch.
|
:param int batch_idx: The index of the current batch.
|
||||||
"""
|
"""
|
||||||
# increase by one the counter of optimization to save loggers
|
# increase by one the counter of optimization to save loggers
|
||||||
@@ -234,7 +238,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The optimizer associated to the model.
|
The optimizer associated to the model.
|
||||||
|
|
||||||
:return: The optimizer for the model.
|
:return: The optimizer for the model.
|
||||||
:rtype: torch.optim.Optimizer
|
:rtype: Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[0]
|
return self.optimizers[0]
|
||||||
|
|
||||||
@@ -244,7 +248,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The optimizer associated to the discriminator.
|
The optimizer associated to the discriminator.
|
||||||
|
|
||||||
:return: The optimizer for the discriminator.
|
:return: The optimizer for the discriminator.
|
||||||
:rtype: torch.optim.Optimizer
|
:rtype: Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[1]
|
return self.optimizers[1]
|
||||||
|
|
||||||
@@ -254,7 +258,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The scheduler associated to the model.
|
The scheduler associated to the model.
|
||||||
|
|
||||||
:return: The scheduler for the model.
|
:return: The scheduler for the model.
|
||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: Scheduler
|
||||||
"""
|
"""
|
||||||
return self.schedulers[0]
|
return self.schedulers[0]
|
||||||
|
|
||||||
@@ -264,6 +268,6 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The scheduler associated to the discriminator.
|
The scheduler associated to the discriminator.
|
||||||
|
|
||||||
:return: The scheduler for the discriminator.
|
:return: The scheduler for the discriminator.
|
||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: Scheduler
|
||||||
"""
|
"""
|
||||||
return self.schedulers[1]
|
return self.schedulers[1]
|
||||||
|
|||||||
@@ -46,15 +46,15 @@ class GradientPINN(PINN):
|
|||||||
|
|
||||||
.. seealso::
|
.. seealso::
|
||||||
|
|
||||||
**Original reference**: Yu, Jeremy, et al. "Gradient-enhanced
|
**Original reference**: Yu, Jeremy, et al.
|
||||||
physics-informed neural networks for forward and inverse
|
*Gradient-enhanced physics-informed neural networks for forward and
|
||||||
PDE problems." Computer Methods in Applied Mechanics
|
inverse PDE problems.*
|
||||||
and Engineering 393 (2022): 114823.
|
Computer Methods in Applied Mechanics and Engineering 393 (2022):114823.
|
||||||
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
|
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
This class is only compatible with problems that inherit from the
|
This class is only compatible with problems that inherit from the
|
||||||
:class:`~pina.problem.SpatialProblem` class.
|
:class:`~pina.problem.spatial_problem.SpatialProblem` class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -70,18 +70,20 @@ class GradientPINN(PINN):
|
|||||||
Initialization of the :class:`GradientPINN` class.
|
Initialization of the :class:`GradientPINN` class.
|
||||||
|
|
||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
It must inherit from at least :class:`~pina.problem.SpatialProblem`
|
It must inherit from at least
|
||||||
to compute the gradient of the loss.
|
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute the
|
||||||
|
gradient of the loss.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
|
||||||
If `None`, the constant learning rate scheduler is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param Scheduler scheduler: Learning rate scheduler.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
:raises ValueError: If the problem is not a SpatialProblem.
|
:raises ValueError: If the problem is not a SpatialProblem.
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ class PINN(PINNInterface, SingleSolverInterface):
|
|||||||
|
|
||||||
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
|
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
|
||||||
Perdikaris, P., Wang, S., & Yang, L. (2021).
|
Perdikaris, P., Wang, S., & Yang, L. (2021).
|
||||||
Physics-informed machine learning. Nature Reviews Physics, 3, 422-440.
|
*Physics-informed machine learning.*
|
||||||
|
Nature Reviews Physics, 3, 422-440.
|
||||||
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
|
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -60,15 +61,16 @@ class PINN(PINNInterface, SingleSolverInterface):
|
|||||||
|
|
||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
|
||||||
If `None`, the constant learning rate scheduler is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param Scheduler scheduler: Learning rate scheduler.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
"""
|
"""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
@@ -101,7 +103,7 @@ class PINN(PINNInterface, SingleSolverInterface):
|
|||||||
Optimizer configuration for the PINN solver.
|
Optimizer configuration for the PINN solver.
|
||||||
|
|
||||||
:return: The optimizers and the schedulers
|
:return: The optimizers and the schedulers
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||||
"""
|
"""
|
||||||
# If the problem is an InverseProblem, add the unknown parameters
|
# If the problem is an InverseProblem, add the unknown parameters
|
||||||
# to the parameters to be optimized.
|
# to the parameters to be optimized.
|
||||||
|
|||||||
@@ -18,12 +18,12 @@ from ...condition import (
|
|||||||
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||||
"""
|
"""
|
||||||
Base class for Physics-Informed Neural Network (PINN) solvers, implementing
|
Base class for Physics-Informed Neural Network (PINN) solvers, implementing
|
||||||
the :class:`~pina.solver.SolverInterface` class.
|
the :class:`~pina.solver.solver.SolverInterface` class.
|
||||||
|
|
||||||
The `PINNInterface` class can be used to define PINNs that work with one or
|
The `PINNInterface` class can be used to define PINNs that work with one or
|
||||||
multiple optimizers and/or models. By default, it is compatible with
|
multiple optimizers and/or models. By default, it is compatible with
|
||||||
problems defined by :class:`~pina.problem.AbstractProblem`, and users can
|
problems defined by :class:`~pina.problem.abstract_problem.AbstractProblem`,
|
||||||
choose the problem type the solver is meant to address.
|
and users can choose the problem type the solver is meant to address.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
accepted_conditions_types = (
|
accepted_conditions_types = (
|
||||||
@@ -38,10 +38,10 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
|
|
||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If ``None``, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is ``None``.
|
Default is `None`.
|
||||||
:param kwargs: Additional keyword arguments to be passed to the
|
:param kwargs: Additional keyword arguments to be passed to the
|
||||||
:class:`~pina.solver.SolverInterface` class.
|
:class:`~pina.solver.solver.SolverInterface` class.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if loss is None:
|
if loss is None:
|
||||||
@@ -73,9 +73,12 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
loss as argument, thus distinguishing the training step from the
|
loss as argument, thus distinguishing the training step from the
|
||||||
validation and test steps.
|
validation and test steps.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the optimization cycle.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
:return: The loss of the optimization cycle.
|
tuple containing a condition name and a dictionary of points.
|
||||||
:rtype: torch.Tensor
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
|
containing the condition name and the associated scalar loss.
|
||||||
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
return self._run_optimization_cycle(batch, self.loss_phys)
|
return self._run_optimization_cycle(batch, self.loss_phys)
|
||||||
|
|
||||||
@@ -84,7 +87,8 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
The validation step for the PINN solver.
|
The validation step for the PINN solver.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the validation step.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The loss of the validation step.
|
:return: The loss of the validation step.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
@@ -98,7 +102,8 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
The test step for the PINN solver.
|
The test step for the PINN solver.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the test step.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The loss of the test step.
|
:return: The loss of the test step.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
@@ -169,10 +174,13 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
Compute, given a batch, the loss for each condition and return a
|
Compute, given a batch, the loss for each condition and return a
|
||||||
dictionary with the condition name as key and the loss as value.
|
dictionary with the condition name as key and the loss as value.
|
||||||
|
|
||||||
:param dict batch: The batch of data to use in the optimization cycle.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:param function loss_residuals: The loss function to be minimized.
|
:param function loss_residuals: The loss function to be minimized.
|
||||||
:return: The loss for each condition.
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
:rtype dict
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
|
containing the condition name and the associated scalar loss.
|
||||||
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
condition_loss = {}
|
condition_loss = {}
|
||||||
for condition_name, points in batch:
|
for condition_name, points in batch:
|
||||||
|
|||||||
@@ -59,11 +59,11 @@ class RBAPINN(PINN):
|
|||||||
.. seealso::
|
.. seealso::
|
||||||
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
|
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
|
||||||
Nikolaos Stergiopulos, and George E. Karniadakis.
|
Nikolaos Stergiopulos, and George E. Karniadakis.
|
||||||
"Residual-based attention and connection to information
|
*Residual-based attention and connection to information
|
||||||
bottleneck theory in PINNs".
|
bottleneck theory in PINNs.*
|
||||||
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
||||||
DOI: `10.1016/
|
DOI: `10.1016/j.cma.2024.116805
|
||||||
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
<https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -82,15 +82,16 @@ class RBAPINN(PINN):
|
|||||||
|
|
||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
|
||||||
If `None`, the constant learning rate scheduler is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param Scheduler scheduler: Learning rate scheduler.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
:param float | int eta: The learning rate for the weights of the
|
:param float | int eta: The learning rate for the weights of the
|
||||||
residuals. Default is ``0.001``.
|
residuals. Default is ``0.001``.
|
||||||
@@ -147,7 +148,7 @@ class RBAPINN(PINN):
|
|||||||
:param LabelTensor loss_value: the tensor of pointwise losses.
|
:param LabelTensor loss_value: the tensor of pointwise losses.
|
||||||
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
||||||
:return: The computed scalar loss.
|
:return: The computed scalar loss.
|
||||||
:rtype LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
if self.loss.reduction == "mean":
|
if self.loss.reduction == "mean":
|
||||||
ret = torch.mean(loss_value)
|
ret = torch.mean(loss_value)
|
||||||
|
|||||||
@@ -94,10 +94,10 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
|
|
||||||
.. seealso::
|
.. seealso::
|
||||||
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
|
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
|
||||||
"Self-adaptive physics-informed neural networks."
|
*Self-adaptive physics-informed neural networks.*
|
||||||
Journal of Computational Physics 474 (2023): 111722.
|
Journal of Computational Physics 474 (2023): 111722.
|
||||||
DOI: `10.1016/
|
DOI: `10.1016/j.jcp.2022.111722
|
||||||
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
<https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@@ -119,22 +119,25 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
:param torch.nn.Module model: The model to be used.
|
:param torch.nn.Module model: The model to be used.
|
||||||
:param torch.nn.Module weight_function: The Self-Adaptive mask model.
|
:param torch.nn.Module weight_function: The Self-Adaptive mask model.
|
||||||
Default is ``torch.nn.Sigmoid()``.
|
Default is ``torch.nn.Sigmoid()``.
|
||||||
:param torch.optim.Optimizer optimizer_model: The optimizer of the
|
:param Optimizer optimizer_model: The optimizer of the ``model``.
|
||||||
``model``. If `None`, the Adam optimizer is used.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
:param torch.optim.Optimizer optimizer_weights: The optimizer of the
|
:param Optimizer optimizer_weights: The optimizer of the
|
||||||
``weight_function``. If `None`, the Adam optimizer is used.
|
``weight_function``.
|
||||||
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
:param Scheduler scheduler_model: Learning rate scheduler for the
|
||||||
for the ``model``. If `None`, the constant learning rate scheduler
|
``model``.
|
||||||
is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
:param torch.optim.LRScheduler scheduler_weights: Learning rate
|
scheduler is used. Default is ``None``.
|
||||||
scheduler for the ``weight_function``. If `None`, the constant
|
:param Scheduler scheduler_weights: Learning rate scheduler for the
|
||||||
learning rate scheduler is used. Default is ``None``.
|
``weight_function``.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
"""
|
"""
|
||||||
# check consistency weitghs_function
|
# check consistency weitghs_function
|
||||||
@@ -175,7 +178,8 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
"""
|
"""
|
||||||
Solver training step, overridden to perform manual optimization.
|
Solver training step, overridden to perform manual optimization.
|
||||||
|
|
||||||
:param dict batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The aggregated loss.
|
:return: The aggregated loss.
|
||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
@@ -198,7 +202,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
Optimizer configuration.
|
Optimizer configuration.
|
||||||
|
|
||||||
:return: The optimizers and the schedulers
|
:return: The optimizers and the schedulers
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||||
"""
|
"""
|
||||||
# If the problem is an InverseProblem, add the unknown parameters
|
# If the problem is an InverseProblem, add the unknown parameters
|
||||||
# to the parameters to be optimized
|
# to the parameters to be optimized
|
||||||
@@ -227,7 +231,8 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
|
|
||||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||||
batch.
|
batch.
|
||||||
:param dict batch: The current batch of data.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:param int batch_idx: The index of the current batch.
|
:param int batch_idx: The index of the current batch.
|
||||||
"""
|
"""
|
||||||
# increase by one the counter of optimization to save loggers
|
# increase by one the counter of optimization to save loggers
|
||||||
@@ -307,7 +312,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
:param LabelTensor loss_value: the tensor of pointwise losses.
|
:param LabelTensor loss_value: the tensor of pointwise losses.
|
||||||
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
||||||
:return: The computed scalar loss.
|
:return: The computed scalar loss.
|
||||||
:rtype LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
if self.loss.reduction == "mean":
|
if self.loss.reduction == "mean":
|
||||||
ret = torch.mean(loss_value)
|
ret = torch.mean(loss_value)
|
||||||
@@ -346,7 +351,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The scheduler associated to the model.
|
The scheduler associated to the model.
|
||||||
|
|
||||||
:return: The scheduler for the model.
|
:return: The scheduler for the model.
|
||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: Scheduler
|
||||||
"""
|
"""
|
||||||
return self.schedulers[0]
|
return self.schedulers[0]
|
||||||
|
|
||||||
@@ -356,7 +361,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The scheduler associated to the mask model.
|
The scheduler associated to the mask model.
|
||||||
|
|
||||||
:return: The scheduler for the mask model.
|
:return: The scheduler for the mask model.
|
||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: Scheduler
|
||||||
"""
|
"""
|
||||||
return self.schedulers[1]
|
return self.schedulers[1]
|
||||||
|
|
||||||
@@ -366,7 +371,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
Returns the optimizer associated to the model.
|
Returns the optimizer associated to the model.
|
||||||
|
|
||||||
:return: The optimizer for the model.
|
:return: The optimizer for the model.
|
||||||
:rtype: torch.optim.Optimizer
|
:rtype: Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[0]
|
return self.optimizers[0]
|
||||||
|
|
||||||
@@ -376,6 +381,6 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
|||||||
The optimizer associated to the mask model.
|
The optimizer associated to the mask model.
|
||||||
|
|
||||||
:return: The optimizer for the mask model.
|
:return: The optimizer for the mask model.
|
||||||
:rtype: torch.optim.Optimizer
|
:rtype: Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[1]
|
return self.optimizers[1]
|
||||||
|
|||||||
@@ -45,8 +45,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
.. seealso::
|
.. seealso::
|
||||||
|
|
||||||
**Original reference**: Hesthaven, Jan S., and Stefano Ubbiali.
|
**Original reference**: Hesthaven, Jan S., and Stefano Ubbiali.
|
||||||
"Non-intrusive reduced order modeling of nonlinear problems using
|
*Non-intrusive reduced order modeling of nonlinear problems using
|
||||||
neural networks."
|
neural networks.*
|
||||||
Journal of Computational Physics 363 (2018): 55-78.
|
Journal of Computational Physics 363 (2018): 55-78.
|
||||||
DOI `10.1016/j.jcp.2018.02.037
|
DOI `10.1016/j.jcp.2018.02.037
|
||||||
<https://doi.org/10.1016/j.jcp.2018.02.037>`_.
|
<https://doi.org/10.1016/j.jcp.2018.02.037>`_.
|
||||||
@@ -67,8 +67,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
..seealso::
|
..seealso::
|
||||||
**Original reference**: Pichi, Federico, Beatriz Moya, and Jan S.
|
**Original reference**: Pichi, Federico, Beatriz Moya, and Jan S.
|
||||||
Hesthaven.
|
Hesthaven.
|
||||||
"A graph convolutional autoencoder approach to model order reduction
|
*A graph convolutional autoencoder approach to model order reduction
|
||||||
for parametrized PDEs."
|
for parametrized PDEs.*
|
||||||
Journal of Computational Physics 501 (2024): 112762.
|
Journal of Computational Physics 501 (2024): 112762.
|
||||||
DOI `10.1016/j.jcp.2024.112762
|
DOI `10.1016/j.jcp.2024.112762
|
||||||
<https://doi.org/10.1016/j.jcp.2024.112762>`_.
|
<https://doi.org/10.1016/j.jcp.2024.112762>`_.
|
||||||
@@ -105,10 +105,11 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
:param Optimizer optimizer: The optimizer to be used.
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the :class:`torch.optim.Adam`. optimizer is used.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
:param Scheduler scheduler: Learning rate scheduler. If `None`,
|
:param Scheduler scheduler: Learning rate scheduler.
|
||||||
the constant learning rate scheduler is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||||
@@ -152,9 +153,10 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
of the ``interpolation_network`` on the input, and maps it to output
|
of the ``interpolation_network`` on the input, and maps it to output
|
||||||
space by calling the decode methode of the ``reduction_network``.
|
space by calling the decode methode of the ``reduction_network``.
|
||||||
|
|
||||||
:param torch.Tensor x: Input tensor.
|
:param x: Input tensor.
|
||||||
|
:type x: torch.Tensor | LabelTensor
|
||||||
:return: Solver solution.
|
:return: Solver solution.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor | LabelTensor
|
||||||
"""
|
"""
|
||||||
reduction_network = self.model["reduction_network"]
|
reduction_network = self.model["reduction_network"]
|
||||||
interpolation_network = self.model["interpolation_network"]
|
interpolation_network = self.model["interpolation_network"]
|
||||||
|
|||||||
@@ -75,9 +75,9 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
|
|
||||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
tuple containing a condition name and a dictionary of points.
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The computed loss for the all conditions in the batch, casted
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
the condition name and the associated scalar loss.
|
containing the condition name and the associated scalar loss.
|
||||||
:rtype: dict
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
losses = self.optimization_cycle(batch)
|
losses = self.optimization_cycle(batch)
|
||||||
@@ -92,7 +92,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
Solver training step.
|
Solver training step.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The loss of the training step.
|
:return: The loss of the training step.
|
||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
@@ -104,7 +105,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
Solver validation step.
|
Solver validation step.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
"""
|
"""
|
||||||
loss = self._optimization_cycle(batch=batch)
|
loss = self._optimization_cycle(batch=batch)
|
||||||
self.store_log("val_loss", loss, self.get_batch_size(batch))
|
self.store_log("val_loss", loss, self.get_batch_size(batch))
|
||||||
@@ -113,7 +115,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
Solver test step.
|
Solver test step.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
"""
|
"""
|
||||||
loss = self._optimization_cycle(batch=batch)
|
loss = self._optimization_cycle(batch=batch)
|
||||||
self.store_log("test_loss", loss, self.get_batch_size(batch))
|
self.store_log("test_loss", loss, self.get_batch_size(batch))
|
||||||
@@ -138,6 +141,10 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
def forward(self, *args, **kwargs):
|
def forward(self, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Abstract method for the forward pass implementation.
|
Abstract method for the forward pass implementation.
|
||||||
|
|
||||||
|
:param args: The input tensor.
|
||||||
|
:type args: torch.Tensor | LabelTensor
|
||||||
|
:param dict kwargs: Additional keyword arguments.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
@@ -145,10 +152,11 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
The optimization cycle for the solvers.
|
The optimization cycle for the solvers.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
:return: The computed loss for the all conditions in the batch, casted
|
tuple containing a condition name and a dictionary of points.
|
||||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
the condition name and the associated scalar loss.
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
|
containing the condition name and the associated scalar loss.
|
||||||
:rtype: dict
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -187,7 +195,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
Get the batch size.
|
Get the batch size.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
|
tuple containing a condition name and a dictionary of points.
|
||||||
:return: The size of the batch.
|
:return: The size of the batch.
|
||||||
:rtype: int
|
:rtype: int
|
||||||
"""
|
"""
|
||||||
@@ -296,10 +305,11 @@ class SingleSolverInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param Optimizer optimizer: The optimizer to be used.
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||||
|
used. Default is ``None``.
|
||||||
:param Scheduler scheduler: The scheduler to be used.
|
:param Scheduler scheduler: The scheduler to be used.
|
||||||
If `None`, the constant learning rate scheduler is used.
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
Default is ``None``.
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||||
@@ -341,7 +351,7 @@ class SingleSolverInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
Optimizer configuration for the solver.
|
Optimizer configuration for the solver.
|
||||||
|
|
||||||
:return: The optimizer and the scheduler
|
:return: The optimizer and the scheduler
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||||
"""
|
"""
|
||||||
self.optimizer.hook(self.model.parameters())
|
self.optimizer.hook(self.model.parameters())
|
||||||
self.scheduler.hook(self.optimizer)
|
self.scheduler.hook(self.optimizer)
|
||||||
@@ -421,11 +431,11 @@ class MultiSolverInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
:param models: The neural network models to be used.
|
:param models: The neural network models to be used.
|
||||||
:type model: list[torch.nn.Module] | tuple[torch.nn.Module]
|
:type model: list[torch.nn.Module] | tuple[torch.nn.Module]
|
||||||
:param list[Optimizer] optimizers: The optimizers to be used.
|
:param list[Optimizer] optimizers: The optimizers to be used.
|
||||||
If `None`, the Adam optimizer is used for all models.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used for all
|
||||||
Default is ``None``.
|
|
||||||
:param list[Scheduler] schedulers: The schedulers to be used.
|
|
||||||
If `None`, the constant learning rate scheduler is used for all the
|
|
||||||
models. Default is ``None``.
|
models. Default is ``None``.
|
||||||
|
:param list[Scheduler] schedulers: The schedulers to be used.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used for all the models. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||||
@@ -480,7 +490,7 @@ class MultiSolverInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
Optimizer configuration for the solver.
|
Optimizer configuration for the solver.
|
||||||
|
|
||||||
:return: The optimizer and the scheduler
|
:return: The optimizer and the scheduler
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||||
"""
|
"""
|
||||||
for optimizer, scheduler, model in zip(
|
for optimizer, scheduler, model in zip(
|
||||||
self.optimizers, self.schedulers, self.models
|
self.optimizers, self.schedulers, self.models
|
||||||
|
|||||||
@@ -52,13 +52,14 @@ class SupervisedSolver(SingleSolverInterface):
|
|||||||
:param AbstractProblem problem: The problem to be solved.
|
:param AbstractProblem problem: The problem to be solved.
|
||||||
:param torch.nn.Module model: The neural network model to be used.
|
:param torch.nn.Module model: The neural network model to be used.
|
||||||
:param torch.nn.Module loss: The loss function to be minimized.
|
:param torch.nn.Module loss: The loss function to be minimized.
|
||||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||||
Default is `None`.
|
Default is `None`.
|
||||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
:param Optimizer optimizer: The optimizer to be used.
|
||||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
|
||||||
If `None`, the constant learning rate scheduler is used.
|
|
||||||
Default is ``None``.
|
Default is ``None``.
|
||||||
|
:param Scheduler scheduler: Learning rate scheduler.
|
||||||
|
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||||
|
scheduler is used. Default is ``None``.
|
||||||
:param WeightingInterface weighting: The weighting schema to be used.
|
:param WeightingInterface weighting: The weighting schema to be used.
|
||||||
If `None`, no weighting schema is used. Default is ``None``.
|
If `None`, no weighting schema is used. Default is ``None``.
|
||||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||||
@@ -86,10 +87,11 @@ class SupervisedSolver(SingleSolverInterface):
|
|||||||
"""
|
"""
|
||||||
The optimization cycle for the solvers.
|
The optimization cycle for the solvers.
|
||||||
|
|
||||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||||
:return: The computed loss for the all conditions in the batch, casted
|
tuple containing a condition name and a dictionary of points.
|
||||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
:return: The losses computed for all conditions in the batch, casted
|
||||||
the condition name and the associated scalar loss.
|
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||||
|
containing the condition name and the associated scalar loss.
|
||||||
:rtype: dict
|
:rtype: dict
|
||||||
"""
|
"""
|
||||||
condition_loss = {}
|
condition_loss = {}
|
||||||
|
|||||||
Reference in New Issue
Block a user