fix rendering part 2
This commit is contained in:
@@ -63,6 +63,9 @@ class AdvectionProblem(SpatialProblem, TimeDependentProblem):
|
||||
training physics-informed neural networks*.
|
||||
arXiv preprint arXiv:2308.08468 (2023).
|
||||
DOI: `arXiv:2308.08468 <https://arxiv.org/abs/2308.08468>`_.
|
||||
|
||||
:Example:
|
||||
>>> problem = AdvectionProblem(c=1.0)
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -49,6 +49,9 @@ class AllenCahnProblem(TimeDependentProblem, SpatialProblem):
|
||||
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
||||
DOI: `10.1016/
|
||||
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||
|
||||
:Example:
|
||||
>>> problem = AllenCahnProblem()
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -59,6 +59,9 @@ class DiffusionReactionProblem(TimeDependentProblem, SpatialProblem):
|
||||
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
||||
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
||||
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
||||
|
||||
:Example:
|
||||
>>> problem = DiffusionReactionProblem()
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -53,6 +53,9 @@ class HelmholtzProblem(SpatialProblem):
|
||||
**Original reference**: Si, Chenhao, et al. *Complex Physics-Informed
|
||||
Neural Network.* arXiv preprint arXiv:2502.04917 (2025).
|
||||
DOI: `arXiv:2502.04917 <https://arxiv.org/abs/2502.04917>`_.
|
||||
|
||||
:Example:
|
||||
>>> problem = HelmholtzProblem()
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -50,6 +50,9 @@ class InversePoisson2DSquareProblem(SpatialProblem, InverseProblem):
|
||||
Implementation of the inverse 2-dimensional Poisson problem in the square
|
||||
domain :math:`[0, 1] \times [0, 1]`,
|
||||
with unknown parameter domain :math:`[-1, 1] \times [-1, 1]`.
|
||||
|
||||
:Example:
|
||||
>>> problem = InversePoisson2DSquareProblem()
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -30,6 +30,9 @@ class Poisson2DSquareProblem(SpatialProblem):
|
||||
r"""
|
||||
Implementation of the 2-dimensional Poisson problem in the square domain
|
||||
:math:`[0, 1] \times [0, 1]`.
|
||||
|
||||
:Example:
|
||||
>>> problem = Poisson2DSquareProblem()
|
||||
"""
|
||||
|
||||
output_variables = ["u"]
|
||||
|
||||
@@ -45,16 +45,22 @@ class GAROM(MultiSolverInterface):
|
||||
:param torch.nn.Module generator: The generator model.
|
||||
:param torch.nn.Module discriminator: The discriminator model.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If ``None``, ``PowerLoss(p=1)`` is used. Default is ``None``.
|
||||
If ``None``, :class:`~pina.loss.power_loss.PowerLoss` with ``p=1``
|
||||
is used. Default is ``None``.
|
||||
:param Optimizer optimizer_generator: The optimizer for the generator.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param Optimizer optimizer_discriminator: The optimizer for the
|
||||
discriminator. If `None`, the Adam optimizer is used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Optimizer optimizer_discriminator: The optimizer for the
|
||||
discriminator. If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||
used. Default is ``None``.
|
||||
:param Scheduler scheduler_generator: The learning rate scheduler for
|
||||
the generator.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param Scheduler scheduler_discriminator: The learning rate scheduler
|
||||
for the discriminator.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param float gamma: Ratio of expected loss for generator and
|
||||
discriminator. Default is ``0.3``.
|
||||
:param float lambda_k: Learning rate for control theory optimization.
|
||||
@@ -109,7 +115,7 @@ class GAROM(MultiSolverInterface):
|
||||
of the solution. Default is ``False``.
|
||||
:return: The expected value of the generator distribution. If
|
||||
``variance=True``, the method returns also the variance.
|
||||
:rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor)
|
||||
:rtype: torch.Tensor | tuple[torch.Tensor, torch.Tensor]
|
||||
"""
|
||||
|
||||
# sampling
|
||||
@@ -143,7 +149,7 @@ class GAROM(MultiSolverInterface):
|
||||
:param torch.Tensor parameters: The input tensor.
|
||||
:param torch.Tensor snapshots: The target tensor.
|
||||
:return: The residual loss and the generator loss.
|
||||
:rtype: tuple(torch.Tensor, torch.Tensor)
|
||||
:rtype: tuple[torch.Tensor, torch.Tensor]
|
||||
"""
|
||||
optimizer = self.optimizer_generator
|
||||
optimizer.zero_grad()
|
||||
@@ -170,7 +176,8 @@ class GAROM(MultiSolverInterface):
|
||||
|
||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||
batch.
|
||||
:param dict batch: The current batch of data.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:param int batch_idx: The index of the current batch.
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
@@ -187,7 +194,7 @@ class GAROM(MultiSolverInterface):
|
||||
:param torch.Tensor parameters: The input tensor.
|
||||
:param torch.Tensor snapshots: The target tensor.
|
||||
:return: The residual loss and the generator loss.
|
||||
:rtype: tuple(torch.Tensor, torch.Tensor)
|
||||
:rtype: tuple[torch.Tensor, torch.Tensor]
|
||||
"""
|
||||
optimizer = self.optimizer_discriminator
|
||||
optimizer.zero_grad()
|
||||
@@ -234,9 +241,12 @@ class GAROM(MultiSolverInterface):
|
||||
"""
|
||||
The optimization cycle for the GAROM solver.
|
||||
|
||||
:param tuple batch: The batch element in the dataloader.
|
||||
:return: The loss of the optimization cycle.
|
||||
:rtype: LabelTensor
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
@@ -265,7 +275,8 @@ class GAROM(MultiSolverInterface):
|
||||
"""
|
||||
The validation step for the PINN solver.
|
||||
|
||||
:param dict batch: The batch of data to use in the validation step.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The loss of the validation step.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
@@ -287,7 +298,8 @@ class GAROM(MultiSolverInterface):
|
||||
"""
|
||||
The test step for the PINN solver.
|
||||
|
||||
:param dict batch: The batch of data to use in the test step.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The loss of the test step.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
|
||||
@@ -53,14 +53,16 @@ class CausalPINN(PINN):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
|
||||
Perdikaris. "Respecting causality for training physics-informed
|
||||
neural networks." Computer Methods in Applied Mechanics
|
||||
and Engineering 421 (2024): 116813.
|
||||
DOI `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
|
||||
Perdikaris.
|
||||
*Respecting causality for training physics-informed
|
||||
neural networks.*
|
||||
Computer Methods in Applied Mechanics and Engineering 421 (2024):116813.
|
||||
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
|
||||
|
||||
.. note::
|
||||
This class is only compatible with problems that inherit from the
|
||||
:class:`~pina.problem.TimeDependentProblem` class.
|
||||
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`
|
||||
class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -77,17 +79,19 @@ class CausalPINN(PINN):
|
||||
Initialization of the :class:`CausalPINN` class.
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved. It must
|
||||
inherit from at least :class:`~pina.problem.TimeDependentProblem`.
|
||||
inherit from at least
|
||||
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.optim.Optimizer optimizer: The optimizer to be used
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param float eps: The exponential decay parameter. Default is ``100``.
|
||||
:raises ValueError: If the problem is not a TimeDependentProblem.
|
||||
|
||||
@@ -46,8 +46,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Zeng, Qi, et al.
|
||||
"Competitive physics informed networks." International Conference on
|
||||
Learning Representations, ICLR 2022
|
||||
*Competitive physics informed networks.*
|
||||
International Conference on Learning Representations, ICLR 2022
|
||||
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
|
||||
"""
|
||||
|
||||
@@ -72,21 +72,23 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
If `None`, the discriminator is a deepcopy of the ``model``.
|
||||
Default is ``None``.
|
||||
:param torch.optim.Optimizer optimizer_model: The optimizer of the
|
||||
``model``. If `None`, the Adam optimizer is used.
|
||||
Default is ``None``.
|
||||
``model``. If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||
used. Default is ``None``.
|
||||
:param torch.optim.Optimizer optimizer_discriminator: The optimizer of
|
||||
the ``discriminator``. If `None`, the Adam optimizer is used.
|
||||
Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
||||
for the ``model``. If `None`, the constant learning rate scheduler
|
||||
is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler_discriminator: Learning rate
|
||||
scheduler for the ``discriminator``. If `None`, the constant
|
||||
learning rate scheduler is used. Default is ``None``.
|
||||
the ``discriminator``. If `None`, the :class:`torch.optim.Adam`
|
||||
optimizer is used. Default is ``None``.
|
||||
:param Scheduler scheduler_model: Learning rate scheduler for the
|
||||
``model``.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param Scheduler scheduler_discriminator: Learning rate scheduler for
|
||||
the ``discriminator``.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
"""
|
||||
if discriminator is None:
|
||||
@@ -118,7 +120,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
"""
|
||||
Solver training step, overridden to perform manual optimization.
|
||||
|
||||
:param dict batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The aggregated loss.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
@@ -163,7 +166,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
Optimizer configuration.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||
"""
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized
|
||||
@@ -198,7 +201,8 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
|
||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||
batch.
|
||||
:param dict batch: The current batch of data.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:param int batch_idx: The index of the current batch.
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
@@ -234,7 +238,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
The optimizer associated to the model.
|
||||
|
||||
:return: The optimizer for the model.
|
||||
:rtype: torch.optim.Optimizer
|
||||
:rtype: Optimizer
|
||||
"""
|
||||
return self.optimizers[0]
|
||||
|
||||
@@ -244,7 +248,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
The optimizer associated to the discriminator.
|
||||
|
||||
:return: The optimizer for the discriminator.
|
||||
:rtype: torch.optim.Optimizer
|
||||
:rtype: Optimizer
|
||||
"""
|
||||
return self.optimizers[1]
|
||||
|
||||
@@ -254,7 +258,7 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
The scheduler associated to the model.
|
||||
|
||||
:return: The scheduler for the model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
:rtype: Scheduler
|
||||
"""
|
||||
return self.schedulers[0]
|
||||
|
||||
@@ -264,6 +268,6 @@ class CompetitivePINN(PINNInterface, MultiSolverInterface):
|
||||
The scheduler associated to the discriminator.
|
||||
|
||||
:return: The scheduler for the discriminator.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
:rtype: Scheduler
|
||||
"""
|
||||
return self.schedulers[1]
|
||||
|
||||
@@ -46,15 +46,15 @@ class GradientPINN(PINN):
|
||||
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Yu, Jeremy, et al. "Gradient-enhanced
|
||||
physics-informed neural networks for forward and inverse
|
||||
PDE problems." Computer Methods in Applied Mechanics
|
||||
and Engineering 393 (2022): 114823.
|
||||
**Original reference**: Yu, Jeremy, et al.
|
||||
*Gradient-enhanced physics-informed neural networks for forward and
|
||||
inverse PDE problems.*
|
||||
Computer Methods in Applied Mechanics and Engineering 393 (2022):114823.
|
||||
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
|
||||
|
||||
.. note::
|
||||
This class is only compatible with problems that inherit from the
|
||||
:class:`~pina.problem.SpatialProblem` class.
|
||||
:class:`~pina.problem.spatial_problem.SpatialProblem` class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -70,18 +70,20 @@ class GradientPINN(PINN):
|
||||
Initialization of the :class:`GradientPINN` class.
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
It must inherit from at least :class:`~pina.problem.SpatialProblem`
|
||||
to compute the gradient of the loss.
|
||||
It must inherit from at least
|
||||
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute the
|
||||
gradient of the loss.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:raises ValueError: If the problem is not a SpatialProblem.
|
||||
"""
|
||||
|
||||
@@ -42,7 +42,8 @@ class PINN(PINNInterface, SingleSolverInterface):
|
||||
|
||||
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
|
||||
Perdikaris, P., Wang, S., & Yang, L. (2021).
|
||||
Physics-informed machine learning. Nature Reviews Physics, 3, 422-440.
|
||||
*Physics-informed machine learning.*
|
||||
Nature Reviews Physics, 3, 422-440.
|
||||
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
|
||||
"""
|
||||
|
||||
@@ -60,15 +61,16 @@ class PINN(PINNInterface, SingleSolverInterface):
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
"""
|
||||
super().__init__(
|
||||
@@ -101,7 +103,7 @@ class PINN(PINNInterface, SingleSolverInterface):
|
||||
Optimizer configuration for the PINN solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||
"""
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized.
|
||||
|
||||
@@ -18,12 +18,12 @@ from ...condition import (
|
||||
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
"""
|
||||
Base class for Physics-Informed Neural Network (PINN) solvers, implementing
|
||||
the :class:`~pina.solver.SolverInterface` class.
|
||||
the :class:`~pina.solver.solver.SolverInterface` class.
|
||||
|
||||
The `PINNInterface` class can be used to define PINNs that work with one or
|
||||
multiple optimizers and/or models. By default, it is compatible with
|
||||
problems defined by :class:`~pina.problem.AbstractProblem`, and users can
|
||||
choose the problem type the solver is meant to address.
|
||||
problems defined by :class:`~pina.problem.abstract_problem.AbstractProblem`,
|
||||
and users can choose the problem type the solver is meant to address.
|
||||
"""
|
||||
|
||||
accepted_conditions_types = (
|
||||
@@ -38,10 +38,10 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If ``None``, the Mean Squared Error (MSE) loss is used.
|
||||
Default is ``None``.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param kwargs: Additional keyword arguments to be passed to the
|
||||
:class:`~pina.solver.SolverInterface` class.
|
||||
:class:`~pina.solver.solver.SolverInterface` class.
|
||||
"""
|
||||
|
||||
if loss is None:
|
||||
@@ -73,9 +73,12 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
loss as argument, thus distinguishing the training step from the
|
||||
validation and test steps.
|
||||
|
||||
:param dict batch: The batch of data to use in the optimization cycle.
|
||||
:return: The loss of the optimization cycle.
|
||||
:rtype: torch.Tensor
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
return self._run_optimization_cycle(batch, self.loss_phys)
|
||||
|
||||
@@ -84,7 +87,8 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
"""
|
||||
The validation step for the PINN solver.
|
||||
|
||||
:param dict batch: The batch of data to use in the validation step.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The loss of the validation step.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
@@ -98,7 +102,8 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
"""
|
||||
The test step for the PINN solver.
|
||||
|
||||
:param dict batch: The batch of data to use in the test step.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The loss of the test step.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
@@ -169,10 +174,13 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||
Compute, given a batch, the loss for each condition and return a
|
||||
dictionary with the condition name as key and the loss as value.
|
||||
|
||||
:param dict batch: The batch of data to use in the optimization cycle.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:param function loss_residuals: The loss function to be minimized.
|
||||
:return: The loss for each condition.
|
||||
:rtype dict
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
|
||||
@@ -59,11 +59,11 @@ class RBAPINN(PINN):
|
||||
.. seealso::
|
||||
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
|
||||
Nikolaos Stergiopulos, and George E. Karniadakis.
|
||||
"Residual-based attention and connection to information
|
||||
bottleneck theory in PINNs".
|
||||
*Residual-based attention and connection to information
|
||||
bottleneck theory in PINNs.*
|
||||
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
|
||||
DOI: `10.1016/
|
||||
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||
DOI: `10.1016/j.cma.2024.116805
|
||||
<https://doi.org/10.1016/j.cma.2024.116805>`_.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -82,15 +82,16 @@ class RBAPINN(PINN):
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param float | int eta: The learning rate for the weights of the
|
||||
residuals. Default is ``0.001``.
|
||||
@@ -147,7 +148,7 @@ class RBAPINN(PINN):
|
||||
:param LabelTensor loss_value: the tensor of pointwise losses.
|
||||
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
||||
:return: The computed scalar loss.
|
||||
:rtype LabelTensor
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
if self.loss.reduction == "mean":
|
||||
ret = torch.mean(loss_value)
|
||||
|
||||
@@ -94,10 +94,10 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
|
||||
.. seealso::
|
||||
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
|
||||
"Self-adaptive physics-informed neural networks."
|
||||
*Self-adaptive physics-informed neural networks.*
|
||||
Journal of Computational Physics 474 (2023): 111722.
|
||||
DOI: `10.1016/
|
||||
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
||||
DOI: `10.1016/j.jcp.2022.111722
|
||||
<https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -119,22 +119,25 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
:param torch.nn.Module model: The model to be used.
|
||||
:param torch.nn.Module weight_function: The Self-Adaptive mask model.
|
||||
Default is ``torch.nn.Sigmoid()``.
|
||||
:param torch.optim.Optimizer optimizer_model: The optimizer of the
|
||||
``model``. If `None`, the Adam optimizer is used.
|
||||
:param Optimizer optimizer_model: The optimizer of the ``model``.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param torch.optim.Optimizer optimizer_weights: The optimizer of the
|
||||
``weight_function``. If `None`, the Adam optimizer is used.
|
||||
:param Optimizer optimizer_weights: The optimizer of the
|
||||
``weight_function``.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
|
||||
for the ``model``. If `None`, the constant learning rate scheduler
|
||||
is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler_weights: Learning rate
|
||||
scheduler for the ``weight_function``. If `None`, the constant
|
||||
learning rate scheduler is used. Default is ``None``.
|
||||
:param Scheduler scheduler_model: Learning rate scheduler for the
|
||||
``model``.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param Scheduler scheduler_weights: Learning rate scheduler for the
|
||||
``weight_function``.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
"""
|
||||
# check consistency weitghs_function
|
||||
@@ -175,7 +178,8 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
"""
|
||||
Solver training step, overridden to perform manual optimization.
|
||||
|
||||
:param dict batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The aggregated loss.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
@@ -198,7 +202,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
Optimizer configuration.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||
"""
|
||||
# If the problem is an InverseProblem, add the unknown parameters
|
||||
# to the parameters to be optimized
|
||||
@@ -227,7 +231,8 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
|
||||
:param torch.Tensor outputs: The ``model``'s output for the current
|
||||
batch.
|
||||
:param dict batch: The current batch of data.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:param int batch_idx: The index of the current batch.
|
||||
"""
|
||||
# increase by one the counter of optimization to save loggers
|
||||
@@ -307,7 +312,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
:param LabelTensor loss_value: the tensor of pointwise losses.
|
||||
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
|
||||
:return: The computed scalar loss.
|
||||
:rtype LabelTensor
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
if self.loss.reduction == "mean":
|
||||
ret = torch.mean(loss_value)
|
||||
@@ -346,7 +351,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
The scheduler associated to the model.
|
||||
|
||||
:return: The scheduler for the model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
:rtype: Scheduler
|
||||
"""
|
||||
return self.schedulers[0]
|
||||
|
||||
@@ -356,7 +361,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
The scheduler associated to the mask model.
|
||||
|
||||
:return: The scheduler for the mask model.
|
||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||
:rtype: Scheduler
|
||||
"""
|
||||
return self.schedulers[1]
|
||||
|
||||
@@ -366,7 +371,7 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
Returns the optimizer associated to the model.
|
||||
|
||||
:return: The optimizer for the model.
|
||||
:rtype: torch.optim.Optimizer
|
||||
:rtype: Optimizer
|
||||
"""
|
||||
return self.optimizers[0]
|
||||
|
||||
@@ -376,6 +381,6 @@ class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
|
||||
The optimizer associated to the mask model.
|
||||
|
||||
:return: The optimizer for the mask model.
|
||||
:rtype: torch.optim.Optimizer
|
||||
:rtype: Optimizer
|
||||
"""
|
||||
return self.optimizers[1]
|
||||
|
||||
@@ -45,8 +45,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Hesthaven, Jan S., and Stefano Ubbiali.
|
||||
"Non-intrusive reduced order modeling of nonlinear problems using
|
||||
neural networks."
|
||||
*Non-intrusive reduced order modeling of nonlinear problems using
|
||||
neural networks.*
|
||||
Journal of Computational Physics 363 (2018): 55-78.
|
||||
DOI `10.1016/j.jcp.2018.02.037
|
||||
<https://doi.org/10.1016/j.jcp.2018.02.037>`_.
|
||||
@@ -67,8 +67,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
..seealso::
|
||||
**Original reference**: Pichi, Federico, Beatriz Moya, and Jan S.
|
||||
Hesthaven.
|
||||
"A graph convolutional autoencoder approach to model order reduction
|
||||
for parametrized PDEs."
|
||||
*A graph convolutional autoencoder approach to model order reduction
|
||||
for parametrized PDEs.*
|
||||
Journal of Computational Physics 501 (2024): 112762.
|
||||
DOI `10.1016/j.jcp.2024.112762
|
||||
<https://doi.org/10.1016/j.jcp.2024.112762>`_.
|
||||
@@ -105,10 +105,11 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam`. optimizer is used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler. If `None`,
|
||||
the constant learning rate scheduler is used. Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
@@ -152,9 +153,10 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
||||
of the ``interpolation_network`` on the input, and maps it to output
|
||||
space by calling the decode methode of the ``reduction_network``.
|
||||
|
||||
:param torch.Tensor x: Input tensor.
|
||||
:param x: Input tensor.
|
||||
:type x: torch.Tensor | LabelTensor
|
||||
:return: Solver solution.
|
||||
:rtype: torch.Tensor
|
||||
:rtype: torch.Tensor | LabelTensor
|
||||
"""
|
||||
reduction_network = self.model["reduction_network"]
|
||||
interpolation_network = self.model["interpolation_network"]
|
||||
|
||||
@@ -75,9 +75,9 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The computed loss for the all conditions in the batch, casted
|
||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
||||
the condition name and the associated scalar loss.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
losses = self.optimization_cycle(batch)
|
||||
@@ -92,7 +92,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Solver training step.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The loss of the training step.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
@@ -104,7 +105,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Solver validation step.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
"""
|
||||
loss = self._optimization_cycle(batch=batch)
|
||||
self.store_log("val_loss", loss, self.get_batch_size(batch))
|
||||
@@ -113,7 +115,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Solver test step.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
"""
|
||||
loss = self._optimization_cycle(batch=batch)
|
||||
self.store_log("test_loss", loss, self.get_batch_size(batch))
|
||||
@@ -138,6 +141,10 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
def forward(self, *args, **kwargs):
|
||||
"""
|
||||
Abstract method for the forward pass implementation.
|
||||
|
||||
:param args: The input tensor.
|
||||
:type args: torch.Tensor | LabelTensor
|
||||
:param dict kwargs: Additional keyword arguments.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
@@ -145,10 +152,11 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
The optimization cycle for the solvers.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:return: The computed loss for the all conditions in the batch, casted
|
||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
||||
the condition name and the associated scalar loss.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
@@ -187,7 +195,8 @@ class SolverInterface(lightning.pytorch.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Get the batch size.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The size of the batch.
|
||||
:rtype: int
|
||||
"""
|
||||
@@ -296,10 +305,11 @@ class SingleSolverInterface(SolverInterface, metaclass=ABCMeta):
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is
|
||||
used. Default is ``None``.
|
||||
:param Scheduler scheduler: The scheduler to be used.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
Default is ``None``.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
@@ -341,7 +351,7 @@ class SingleSolverInterface(SolverInterface, metaclass=ABCMeta):
|
||||
Optimizer configuration for the solver.
|
||||
|
||||
:return: The optimizer and the scheduler
|
||||
:rtype: tuple(list, list)
|
||||
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||
"""
|
||||
self.optimizer.hook(self.model.parameters())
|
||||
self.scheduler.hook(self.optimizer)
|
||||
@@ -421,11 +431,11 @@ class MultiSolverInterface(SolverInterface, metaclass=ABCMeta):
|
||||
:param models: The neural network models to be used.
|
||||
:type model: list[torch.nn.Module] | tuple[torch.nn.Module]
|
||||
:param list[Optimizer] optimizers: The optimizers to be used.
|
||||
If `None`, the Adam optimizer is used for all models.
|
||||
Default is ``None``.
|
||||
:param list[Scheduler] schedulers: The schedulers to be used.
|
||||
If `None`, the constant learning rate scheduler is used for all the
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used for all
|
||||
models. Default is ``None``.
|
||||
:param list[Scheduler] schedulers: The schedulers to be used.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used for all the models. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
@@ -480,7 +490,7 @@ class MultiSolverInterface(SolverInterface, metaclass=ABCMeta):
|
||||
Optimizer configuration for the solver.
|
||||
|
||||
:return: The optimizer and the scheduler
|
||||
:rtype: tuple(list, list)
|
||||
:rtype: tuple[list[Optimizer], list[Scheduler]]
|
||||
"""
|
||||
for optimizer, scheduler, model in zip(
|
||||
self.optimizers, self.schedulers, self.models
|
||||
|
||||
@@ -52,13 +52,14 @@ class SupervisedSolver(SingleSolverInterface):
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If `None`, the Mean Squared Error (MSE) loss is used.
|
||||
If `None`, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param torch.optim.Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the Adam optimizer is used. Default is ``None``.
|
||||
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the constant learning rate scheduler is used.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If `None`, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If `None`, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
@@ -86,10 +87,11 @@ class SupervisedSolver(SingleSolverInterface):
|
||||
"""
|
||||
The optimization cycle for the solvers.
|
||||
|
||||
:param list[tuple[str, dict]] batch: The batch element in the dataloader.
|
||||
:return: The computed loss for the all conditions in the batch, casted
|
||||
to a subclass of `torch.Tensor`. It should return a dict containing
|
||||
the condition name and the associated scalar loss.
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
condition_loss = {}
|
||||
|
||||
Reference in New Issue
Block a user