From da1ac90b991820538b27f08bd999fa02fa393d95 Mon Sep 17 00:00:00 2001 From: giovanni Date: Wed, 12 Mar 2025 18:05:42 +0100 Subject: [PATCH] fix doc loss and codacy --- pina/domain/cartesian.py | 3 +- pina/domain/difference_domain.py | 3 +- pina/domain/ellipsoid.py | 9 +++--- pina/domain/exclusion_domain.py | 3 +- pina/domain/intersection_domain.py | 7 +++-- pina/domain/operation_interface.py | 3 +- pina/domain/simplex.py | 5 +-- pina/domain/union_domain.py | 3 +- pina/equation/system_equation.py | 3 +- pina/loss/__init__.py | 4 +-- pina/loss/loss_interface.py | 40 ++++++++++-------------- pina/loss/lp_loss.py | 48 ++++++++++++---------------- pina/loss/power_loss.py | 50 +++++++++++++----------------- pina/loss/scalar_weighting.py | 29 ++++++++++++++--- pina/loss/weighting_interface.py | 12 ++++--- 15 files changed, 114 insertions(+), 108 deletions(-) diff --git a/pina/domain/cartesian.py b/pina/domain/cartesian.py index 9870a66..4e6f3b9 100644 --- a/pina/domain/cartesian.py +++ b/pina/domain/cartesian.py @@ -270,7 +270,8 @@ class CartesianDomain(DomainInterface): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the hypercube. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ is_inside = [] diff --git a/pina/domain/difference_domain.py b/pina/domain/difference_domain.py index 573d736..a2d74cf 100644 --- a/pina/domain/difference_domain.py +++ b/pina/domain/difference_domain.py @@ -45,7 +45,8 @@ class Difference(OperationInterface): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the domain. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ for geometry in self.geometries[1:]: diff --git a/pina/domain/ellipsoid.py b/pina/domain/ellipsoid.py index a99d2b6..b828be7 100644 --- a/pina/domain/ellipsoid.py +++ b/pina/domain/ellipsoid.py @@ -18,7 +18,7 @@ class EllipsoidDomain(DomainInterface): :param dict ellipsoid_dict: A dictionary where the keys are the variable names and the values are the domain extrema. :param bool sample_surface: A flag to choose the sampling strategy. - If ``True``, samples are taken only from the surface of the ellipsoid. + If ``True``, samples are taken from the surface of the ellipsoid. If ``False``, samples are taken from the interior of the ellipsoid. Default is ``False``. :raises TypeError: If the input dictionary is not correctly formatted. @@ -26,7 +26,8 @@ class EllipsoidDomain(DomainInterface): .. warning:: Sampling for dimensions greater or equal to 10 could result in a shrinkage of the ellipsoid, which degrades the quality of the - samples. For dimensions higher than 10, use other sampling algorithms. + samples. For dimensions higher than 10, use other sampling + algorithms. .. seealso:: **Original reference**: Dezert, Jean, and Musso, Christian. *An efficient method for generating points uniformly distributed @@ -101,7 +102,8 @@ class EllipsoidDomain(DomainInterface): the ellipsoid. Default is ``False``. :raises ValueError: If the labels of the point are different from those passed in the ``__init__`` method. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool .. note:: @@ -147,7 +149,6 @@ class EllipsoidDomain(DomainInterface): return bool(eqn < 0) def _sample_range(self, n, mode, variables): - """""" """ Rescale the samples to fit within the specified bounds. diff --git a/pina/domain/exclusion_domain.py b/pina/domain/exclusion_domain.py index c9d6692..db9564b 100644 --- a/pina/domain/exclusion_domain.py +++ b/pina/domain/exclusion_domain.py @@ -44,7 +44,8 @@ class Exclusion(OperationInterface): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the domain. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ flag = 0 diff --git a/pina/domain/intersection_domain.py b/pina/domain/intersection_domain.py index e5ccc58..c6ffe8f 100644 --- a/pina/domain/intersection_domain.py +++ b/pina/domain/intersection_domain.py @@ -24,8 +24,8 @@ class Intersection(OperationInterface): Initialization of the :class:`Intersection` class. :param list[DomainInterface] geometries: A list of instances of the - :class:`~pina.domain.DomainInterface` class on which the intersection - operation is performed. + :class:`~pina.domain.DomainInterface` class on which the + intersection operation is performed. :Example: >>> # Create two ellipsoid domains @@ -43,7 +43,8 @@ class Intersection(OperationInterface): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the domain. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ flag = 0 diff --git a/pina/domain/operation_interface.py b/pina/domain/operation_interface.py index e8b8b6d..b377c41 100644 --- a/pina/domain/operation_interface.py +++ b/pina/domain/operation_interface.py @@ -70,7 +70,8 @@ class OperationInterface(DomainInterface, metaclass=ABCMeta): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the resulting domain. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ diff --git a/pina/domain/simplex.py b/pina/domain/simplex.py index 4c4d616..cc496da 100644 --- a/pina/domain/simplex.py +++ b/pina/domain/simplex.py @@ -98,7 +98,7 @@ class SimplexDomain(DomainInterface): """ Build the cartesian border for a simplex domain to be used in sampling. - :param list[LabelTensor] vertices: Matrix of vertices defining the domain. + :param list[LabelTensor] vertices: list of vertices defining the domain. :return: The cartesian border for the simplex domain. :rtype: CartesianDomain """ @@ -124,7 +124,8 @@ class SimplexDomain(DomainInterface): the simplex. Default is ``False``. :raises ValueError: If the labels of the point are different from those passed in the ``__init__`` method. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ diff --git a/pina/domain/union_domain.py b/pina/domain/union_domain.py index b3c1426..f3f3b4f 100644 --- a/pina/domain/union_domain.py +++ b/pina/domain/union_domain.py @@ -51,7 +51,8 @@ class Union(OperationInterface): :param LabelTensor point: Point to be checked. :param bool check_border: If ``True``, the border is considered inside the domain. Default is ``False``. - :return: ``True`` if the point is inside the domain, ``False`` otherwise. + :return: ``True`` if the point is inside the domain, + ``False`` otherwise. :rtype: bool """ for geometry in self.geometries: diff --git a/pina/equation/system_equation.py b/pina/equation/system_equation.py index 68ab8d0..ff4cf61 100644 --- a/pina/equation/system_equation.py +++ b/pina/equation/system_equation.py @@ -53,7 +53,8 @@ class SystemEquation(EquationInterface): aggregate it according to the ``reduction`` specified in the ``__init__`` method. - :param LabelTensor input_: Input points where each equation is evaluated. + :param LabelTensor input_: Input points where each equation of the + system is evaluated. :param LabelTensor output_: Output tensor, eventually produced by a :class:`~torch.nn.Module` instance. :param dict params_: Dictionary of unknown parameters, associated with a diff --git a/pina/loss/__init__.py b/pina/loss/__init__.py index 178b847..4c57f9b 100644 --- a/pina/loss/__init__.py +++ b/pina/loss/__init__.py @@ -1,6 +1,4 @@ -""" -Module for loss functions and weighting functions. -""" +"""Module for loss functions and weighting functions.""" __all__ = [ "LossInterface", diff --git a/pina/loss/loss_interface.py b/pina/loss/loss_interface.py index 227e2a6..4e0e287 100644 --- a/pina/loss/loss_interface.py +++ b/pina/loss/loss_interface.py @@ -1,4 +1,4 @@ -"""Module for Loss Interface""" +"""Module for the Loss Interface""" from abc import ABCMeta, abstractmethod from torch.nn.modules.loss import _Loss @@ -7,45 +7,37 @@ import torch class LossInterface(_Loss, metaclass=ABCMeta): """ - The abstract ``LossInterface`` class. All the class defining a PINA Loss - should be inheritied from this class. + Abstract base class for all losses. All classes defining a loss function + should inherit from this interface. """ def __init__(self, reduction="mean"): """ - :param str reduction: Specifies the reduction to apply to the output: - ``none`` | ``mean`` | ``sum``. When ``none``: no reduction - will be applied, ``mean``: the sum of the output will be divided - by the number of elements in the output, ``sum``: the output will - be summed. Note: ``size_average`` and ``reduce`` are in the - process of being deprecated, and in the meantime, specifying either - of those two args will override ``reduction``. Default: ``mean``. + Initialization of the :class:`LossInterface` class. + + :param str reduction: The reduction method for the loss. + Available options: ``none``, ``mean``, ``sum``. + If ``none``, no reduction is applied. If ``mean``, the sum of the + loss values is divided by the number of values. If ``sum``, the loss + values are summed. Default is ``mean``. """ super().__init__(reduction=reduction, size_average=None, reduce=None) @abstractmethod def forward(self, input, target): - """Forward method for loss function. + """ + Forward method of the loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. - :return: Loss evaluation. - :rtype: torch.Tensor """ def _reduction(self, loss): - """Simple helper function to check reduction + """ + Apply the reduction to the loss. - :param reduction: Specifies the reduction to apply to the output: - ``none`` | ``mean`` | ``sum``. When ``none``: no reduction - will be applied, ``mean``: the sum of the output will be divided - by the number of elements in the output, ``sum``: the output will - be summed. Note: ``size_average`` and ``reduce`` are in the - process of being deprecated, and in the meantime, specifying either - of those two args will override ``reduction``. Default: ``mean``. - :type reduction: str - :param loss: Loss tensor for each element. - :type loss: torch.Tensor + :param torch.Tensor loss: The tensor containing the pointwise losses. + :raises ValueError: If the reduction method is not valid. :return: Reduced loss. :rtype: torch.Tensor """ diff --git a/pina/loss/lp_loss.py b/pina/loss/lp_loss.py index 03f4473..6790b69 100644 --- a/pina/loss/lp_loss.py +++ b/pina/loss/lp_loss.py @@ -8,26 +8,26 @@ from .loss_interface import LossInterface class LpLoss(LossInterface): r""" - The Lp loss implementation class. Creates a criterion that measures - the Lp error between each element in the input :math:`x` and + Implementation of the Lp Loss. It defines a criterion to measures the + pointwise Lp error between values in the input :math:`x` and values in the target :math:`y`. - The unreduced (i.e. with ``reduction`` set to ``none``) loss can - be described as: + If ``reduction`` is set to ``none``, the loss can be written as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right], - If ``'relative'`` is set to true: + If ``relative`` is set to ``True``, the relative Lp error is computed: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] } {[\sum_{i=1}^{D}|y_n^i|^p]}, - where :math:`N` is the batch size. If ``reduction`` is not ``none`` - (default ``mean``), then: + where :math:`N` is the batch size. + + If ``reduction`` is not ``none``, then: .. math:: \ell(x, y) = @@ -35,30 +35,21 @@ class LpLoss(LossInterface): \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} - - :math:`x` and :math:`y` are tensors of arbitrary shapes with a total - of :math:`n` elements each. - - The sum operation still operates over all the elements, and divides by - :math:`n`. - - The division by :math:`n` can be avoided if one sets ``reduction`` to - ``sum``. """ def __init__(self, p=2, reduction="mean", relative=False): """ - :param int p: Degree of Lp norm. It specifies the type of norm to - be calculated. See `list of possible orders in torch linalg - `torch.linalg.norm `_ - for possible degrees. Default 2 (euclidean norm). - :param str reduction: Specifies the reduction to apply to the output: - ``none`` | ``mean`` | ``sum``. ``none``: no reduction - will be applied, ``mean``: the sum of the output will be divided - by the number of elements in the output, ``sum``: the output will - be summed. - :param bool relative: Specifies if relative error should be computed. + Initialization of the :class:`LpLoss` class. + + :param int p: Degree of the Lp norm. It specifies the norm to be + computed. Default is ``2`` (euclidean norm). + :param str reduction: The reduction method for the loss. + Available options: ``none``, ``mean``, ``sum``. + If ``none``, no reduction is applied. If ``mean``, the sum of the + loss values is divided by the number of values. If ``sum``, the loss + values are summed. Default is ``mean``. + :param bool relative: If ``True``, the relative error is computed. + Default is ``False``. """ super().__init__(reduction=reduction) @@ -70,7 +61,8 @@ class LpLoss(LossInterface): self.relative = relative def forward(self, input, target): - """Forward method for loss function. + """ + Forward method of the loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. diff --git a/pina/loss/power_loss.py b/pina/loss/power_loss.py index 695ef4d..0853052 100644 --- a/pina/loss/power_loss.py +++ b/pina/loss/power_loss.py @@ -8,27 +8,27 @@ from .loss_interface import LossInterface class PowerLoss(LossInterface): r""" - The PowerLoss loss implementation class. Creates a criterion that measures - the error between each element in the input :math:`x` and - target :math:`y` powered to a specific integer. + Implementation of the Power Loss. It defines a criterion to measures the + pointwise error between values in the input :math:`x` and values in the + target :math:`y`. - The unreduced (i.e. with ``reduction`` set to ``none``) loss can - be described as: + If ``reduction`` is set to ``none``, the loss can be written as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p\right], - If ``'relative'`` is set to true: + If ``relative`` is set to ``True``, the relative error is computed: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p } {\sum_{i=1}^{D}|y_n^i|^p}, - where :math:`N` is the batch size. If ``reduction`` is not ``none`` - (default ``mean``), then: + where :math:`N` is the batch size. + + If ``reduction`` is not ``none``, then: .. math:: \ell(x, y) = @@ -36,30 +36,21 @@ class PowerLoss(LossInterface): \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \end{cases} - - :math:`x` and :math:`y` are tensors of arbitrary shapes with a total - of :math:`n` elements each. - - The sum operation still operates over all the elements, and divides by - :math:`n`. - - The division by :math:`n` can be avoided if one sets ``reduction`` to - ``sum``. """ def __init__(self, p=2, reduction="mean", relative=False): """ - :param int p: Degree of Lp norm. It specifies the type of norm to - be calculated. See `list of possible orders in torch linalg - `_ to - see the possible degrees. Default 2 (euclidean norm). - :param str reduction: Specifies the reduction to apply to the output: - ``none`` | ``mean`` | ``sum``. When ``none``: no reduction - will be applied, ``mean``: the sum of the output will be divided - by the number of elements in the output, ``sum``: the output will - be summed. - :param bool relative: Specifies if relative error should be computed. + Initialization of the :class:`PowerLoss` class. + + :param int p: Degree of the Lp norm. It specifies the norm to be + computed. Default is ``2`` (euclidean norm). + :param str reduction: The reduction method for the loss. + Available options: ``none``, ``mean``, ``sum``. + If ``none``, no reduction is applied. If ``mean``, the sum of the + loss values is divided by the number of values. If ``sum``, the loss + values are summed. Default is ``mean``. + :param bool relative: If ``True``, the relative error is computed. + Default is ``False``. """ super().__init__(reduction=reduction) @@ -71,7 +62,8 @@ class PowerLoss(LossInterface): self.relative = relative def forward(self, input, target): - """Forward method for loss function. + """ + Forward method of the loss function. :param torch.Tensor input: Input tensor from real data. :param torch.Tensor target: Model tensor output. diff --git a/pina/loss/scalar_weighting.py b/pina/loss/scalar_weighting.py index 3273dea..6bc093c 100644 --- a/pina/loss/scalar_weighting.py +++ b/pina/loss/scalar_weighting.py @@ -1,20 +1,41 @@ -"""Module for Loss Interface""" +"""Module for the Scalar Weighting.""" from .weighting_interface import WeightingInterface from ..utils import check_consistency class _NoWeighting(WeightingInterface): + """ + Weighting scheme that does not apply any weighting to the losses. + """ + def aggregate(self, losses): + """ + Aggregate the losses. + + :param dict losses: The dictionary of losses. + :return: The aggregated losses. + :rtype: torch.Tensor + """ return sum(losses.values()) class ScalarWeighting(WeightingInterface): """ - TODO + Weighting scheme that assigns a scalar weight to each loss term. """ def __init__(self, weights): + """ + Initialization of the :class:`ScalarWeighting` class. + + :param weights: The weights to be assigned to each loss term. + If a single scalar value is provided, it is assigned to all loss + terms. If a dictionary is provided, the keys are the conditions and + the values are the weights. If a condition is not present in the + dictionary, the default value is used. + :type weights: float | int | dict + """ super().__init__() check_consistency([weights], (float, dict, int)) if isinstance(weights, (float, int)): @@ -28,8 +49,8 @@ class ScalarWeighting(WeightingInterface): """ Aggregate the losses. - :param dict(torch.Tensor) losses: The dictionary of losses. - :return: The losses aggregation. It should be a scalar Tensor. + :param dict losses: The dictionary of losses. + :return: The aggregated losses. :rtype: torch.Tensor """ return sum( diff --git a/pina/loss/weighting_interface.py b/pina/loss/weighting_interface.py index 56a17b8..2df89d2 100644 --- a/pina/loss/weighting_interface.py +++ b/pina/loss/weighting_interface.py @@ -1,14 +1,18 @@ -"""Module for Loss Interface""" +"""Module for the Weighting Interface""" from abc import ABCMeta, abstractmethod class WeightingInterface(metaclass=ABCMeta): """ - The ``weightingInterface`` class. TODO + Abstract base class for all loss weighting schemas. All weighting schemas + should inherit from this class. """ def __init__(self): + """ + Initialization of the :class:`WeightingInterface` class. + """ self.condition_names = None @abstractmethod @@ -16,7 +20,5 @@ class WeightingInterface(metaclass=ABCMeta): """ Aggregate the losses. - :param dict(torch.Tensor) input: The dictionary of losses. - :return: The losses aggregation. It should be a scalar Tensor. - :rtype: torch.Tensor + :param dict losses: The dictionary of losses. """