fix doc loss and codacy

This commit is contained in:
giovanni
2025-03-12 18:05:42 +01:00
committed by FilippoOlivo
parent 2c9e980c7f
commit da1ac90b99
15 changed files with 114 additions and 108 deletions

View File

@@ -270,7 +270,8 @@ class CartesianDomain(DomainInterface):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the hypercube. Default is ``False``. the hypercube. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """
is_inside = [] is_inside = []

View File

@@ -45,7 +45,8 @@ class Difference(OperationInterface):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``. the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """
for geometry in self.geometries[1:]: for geometry in self.geometries[1:]:

View File

@@ -18,7 +18,7 @@ class EllipsoidDomain(DomainInterface):
:param dict ellipsoid_dict: A dictionary where the keys are the variable :param dict ellipsoid_dict: A dictionary where the keys are the variable
names and the values are the domain extrema. names and the values are the domain extrema.
:param bool sample_surface: A flag to choose the sampling strategy. :param bool sample_surface: A flag to choose the sampling strategy.
If ``True``, samples are taken only from the surface of the ellipsoid. If ``True``, samples are taken from the surface of the ellipsoid.
If ``False``, samples are taken from the interior of the ellipsoid. If ``False``, samples are taken from the interior of the ellipsoid.
Default is ``False``. Default is ``False``.
:raises TypeError: If the input dictionary is not correctly formatted. :raises TypeError: If the input dictionary is not correctly formatted.
@@ -26,7 +26,8 @@ class EllipsoidDomain(DomainInterface):
.. warning:: .. warning::
Sampling for dimensions greater or equal to 10 could result in a Sampling for dimensions greater or equal to 10 could result in a
shrinkage of the ellipsoid, which degrades the quality of the shrinkage of the ellipsoid, which degrades the quality of the
samples. For dimensions higher than 10, use other sampling algorithms. samples. For dimensions higher than 10, use other sampling
algorithms.
.. seealso:: .. seealso::
**Original reference**: Dezert, Jean, and Musso, Christian. **Original reference**: Dezert, Jean, and Musso, Christian.
*An efficient method for generating points uniformly distributed *An efficient method for generating points uniformly distributed
@@ -101,7 +102,8 @@ class EllipsoidDomain(DomainInterface):
the ellipsoid. Default is ``False``. the ellipsoid. Default is ``False``.
:raises ValueError: If the labels of the point are different from those :raises ValueError: If the labels of the point are different from those
passed in the ``__init__`` method. passed in the ``__init__`` method.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
.. note:: .. note::
@@ -147,7 +149,6 @@ class EllipsoidDomain(DomainInterface):
return bool(eqn < 0) return bool(eqn < 0)
def _sample_range(self, n, mode, variables): def _sample_range(self, n, mode, variables):
""""""
""" """
Rescale the samples to fit within the specified bounds. Rescale the samples to fit within the specified bounds.

View File

@@ -44,7 +44,8 @@ class Exclusion(OperationInterface):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``. the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """
flag = 0 flag = 0

View File

@@ -24,8 +24,8 @@ class Intersection(OperationInterface):
Initialization of the :class:`Intersection` class. Initialization of the :class:`Intersection` class.
:param list[DomainInterface] geometries: A list of instances of the :param list[DomainInterface] geometries: A list of instances of the
:class:`~pina.domain.DomainInterface` class on which the intersection :class:`~pina.domain.DomainInterface` class on which the
operation is performed. intersection operation is performed.
:Example: :Example:
>>> # Create two ellipsoid domains >>> # Create two ellipsoid domains
@@ -43,7 +43,8 @@ class Intersection(OperationInterface):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``. the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """
flag = 0 flag = 0

View File

@@ -70,7 +70,8 @@ class OperationInterface(DomainInterface, metaclass=ABCMeta):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the resulting domain. Default is ``False``. the resulting domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """

View File

@@ -98,7 +98,7 @@ class SimplexDomain(DomainInterface):
""" """
Build the cartesian border for a simplex domain to be used in sampling. Build the cartesian border for a simplex domain to be used in sampling.
:param list[LabelTensor] vertices: Matrix of vertices defining the domain. :param list[LabelTensor] vertices: list of vertices defining the domain.
:return: The cartesian border for the simplex domain. :return: The cartesian border for the simplex domain.
:rtype: CartesianDomain :rtype: CartesianDomain
""" """
@@ -124,7 +124,8 @@ class SimplexDomain(DomainInterface):
the simplex. Default is ``False``. the simplex. Default is ``False``.
:raises ValueError: If the labels of the point are different from those :raises ValueError: If the labels of the point are different from those
passed in the ``__init__`` method. passed in the ``__init__`` method.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """

View File

@@ -51,7 +51,8 @@ class Union(OperationInterface):
:param LabelTensor point: Point to be checked. :param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside :param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``. the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise. :return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool :rtype: bool
""" """
for geometry in self.geometries: for geometry in self.geometries:

View File

@@ -53,7 +53,8 @@ class SystemEquation(EquationInterface):
aggregate it according to the ``reduction`` specified in the aggregate it according to the ``reduction`` specified in the
``__init__`` method. ``__init__`` method.
:param LabelTensor input_: Input points where each equation is evaluated. :param LabelTensor input_: Input points where each equation of the
system is evaluated.
:param LabelTensor output_: Output tensor, eventually produced by a :param LabelTensor output_: Output tensor, eventually produced by a
:class:`~torch.nn.Module` instance. :class:`~torch.nn.Module` instance.
:param dict params_: Dictionary of unknown parameters, associated with a :param dict params_: Dictionary of unknown parameters, associated with a

View File

@@ -1,6 +1,4 @@
""" """Module for loss functions and weighting functions."""
Module for loss functions and weighting functions.
"""
__all__ = [ __all__ = [
"LossInterface", "LossInterface",

View File

@@ -1,4 +1,4 @@
"""Module for Loss Interface""" """Module for the Loss Interface"""
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
from torch.nn.modules.loss import _Loss from torch.nn.modules.loss import _Loss
@@ -7,45 +7,37 @@ import torch
class LossInterface(_Loss, metaclass=ABCMeta): class LossInterface(_Loss, metaclass=ABCMeta):
""" """
The abstract ``LossInterface`` class. All the class defining a PINA Loss Abstract base class for all losses. All classes defining a loss function
should be inheritied from this class. should inherit from this interface.
""" """
def __init__(self, reduction="mean"): def __init__(self, reduction="mean"):
""" """
:param str reduction: Specifies the reduction to apply to the output: Initialization of the :class:`LossInterface` class.
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
will be applied, ``mean``: the sum of the output will be divided :param str reduction: The reduction method for the loss.
by the number of elements in the output, ``sum``: the output will Available options: ``none``, ``mean``, ``sum``.
be summed. Note: ``size_average`` and ``reduce`` are in the If ``none``, no reduction is applied. If ``mean``, the sum of the
process of being deprecated, and in the meantime, specifying either loss values is divided by the number of values. If ``sum``, the loss
of those two args will override ``reduction``. Default: ``mean``. values are summed. Default is ``mean``.
""" """
super().__init__(reduction=reduction, size_average=None, reduce=None) super().__init__(reduction=reduction, size_average=None, reduce=None)
@abstractmethod @abstractmethod
def forward(self, input, target): def forward(self, input, target):
"""Forward method for loss function. """
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data. :param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output. :param torch.Tensor target: Model tensor output.
:return: Loss evaluation.
:rtype: torch.Tensor
""" """
def _reduction(self, loss): def _reduction(self, loss):
"""Simple helper function to check reduction """
Apply the reduction to the loss.
:param reduction: Specifies the reduction to apply to the output: :param torch.Tensor loss: The tensor containing the pointwise losses.
``none`` | ``mean`` | ``sum``. When ``none``: no reduction :raises ValueError: If the reduction method is not valid.
will be applied, ``mean``: the sum of the output will be divided
by the number of elements in the output, ``sum``: the output will
be summed. Note: ``size_average`` and ``reduce`` are in the
process of being deprecated, and in the meantime, specifying either
of those two args will override ``reduction``. Default: ``mean``.
:type reduction: str
:param loss: Loss tensor for each element.
:type loss: torch.Tensor
:return: Reduced loss. :return: Reduced loss.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """

View File

@@ -8,26 +8,26 @@ from .loss_interface import LossInterface
class LpLoss(LossInterface): class LpLoss(LossInterface):
r""" r"""
The Lp loss implementation class. Creates a criterion that measures Implementation of the Lp Loss. It defines a criterion to measures the
the Lp error between each element in the input :math:`x` and pointwise Lp error between values in the input :math:`x` and values in the
target :math:`y`. target :math:`y`.
The unreduced (i.e. with ``reduction`` set to ``none``) loss can If ``reduction`` is set to ``none``, the loss can be written as:
be described as:
.. math:: .. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right], l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
If ``'relative'`` is set to true: If ``relative`` is set to ``True``, the relative Lp error is computed:
.. math:: .. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] } l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }
{[\sum_{i=1}^{D}|y_n^i|^p]}, {[\sum_{i=1}^{D}|y_n^i|^p]},
where :math:`N` is the batch size. If ``reduction`` is not ``none`` where :math:`N` is the batch size.
(default ``mean``), then:
If ``reduction`` is not ``none``, then:
.. math:: .. math::
\ell(x, y) = \ell(x, y) =
@@ -35,30 +35,21 @@ class LpLoss(LossInterface):
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases} \end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The sum operation still operates over all the elements, and divides by
:math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction`` to
``sum``.
""" """
def __init__(self, p=2, reduction="mean", relative=False): def __init__(self, p=2, reduction="mean", relative=False):
""" """
:param int p: Degree of Lp norm. It specifies the type of norm to Initialization of the :class:`LpLoss` class.
be calculated. See `list of possible orders in torch linalg
`torch.linalg.norm <https://pytorch.org/docs/stable/generated/ :param int p: Degree of the Lp norm. It specifies the norm to be
torch.linalg.norm.html#torch.linalg.norm>`_ computed. Default is ``2`` (euclidean norm).
for possible degrees. Default 2 (euclidean norm). :param str reduction: The reduction method for the loss.
:param str reduction: Specifies the reduction to apply to the output: Available options: ``none``, ``mean``, ``sum``.
``none`` | ``mean`` | ``sum``. ``none``: no reduction If ``none``, no reduction is applied. If ``mean``, the sum of the
will be applied, ``mean``: the sum of the output will be divided loss values is divided by the number of values. If ``sum``, the loss
by the number of elements in the output, ``sum``: the output will values are summed. Default is ``mean``.
be summed. :param bool relative: If ``True``, the relative error is computed.
:param bool relative: Specifies if relative error should be computed. Default is ``False``.
""" """
super().__init__(reduction=reduction) super().__init__(reduction=reduction)
@@ -70,7 +61,8 @@ class LpLoss(LossInterface):
self.relative = relative self.relative = relative
def forward(self, input, target): def forward(self, input, target):
"""Forward method for loss function. """
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data. :param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output. :param torch.Tensor target: Model tensor output.

View File

@@ -8,27 +8,27 @@ from .loss_interface import LossInterface
class PowerLoss(LossInterface): class PowerLoss(LossInterface):
r""" r"""
The PowerLoss loss implementation class. Creates a criterion that measures Implementation of the Power Loss. It defines a criterion to measures the
the error between each element in the input :math:`x` and pointwise error between values in the input :math:`x` and values in the
target :math:`y` powered to a specific integer. target :math:`y`.
The unreduced (i.e. with ``reduction`` set to ``none``) loss can If ``reduction`` is set to ``none``, the loss can be written as:
be described as:
.. math:: .. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
\left| x_n^i - y_n^i \right|^p\right], \left| x_n^i - y_n^i \right|^p\right],
If ``'relative'`` is set to true: If ``relative`` is set to ``True``, the relative error is computed:
.. math:: .. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p } l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
{\sum_{i=1}^{D}|y_n^i|^p}, {\sum_{i=1}^{D}|y_n^i|^p},
where :math:`N` is the batch size. If ``reduction`` is not ``none`` where :math:`N` is the batch size.
(default ``mean``), then:
If ``reduction`` is not ``none``, then:
.. math:: .. math::
\ell(x, y) = \ell(x, y) =
@@ -36,30 +36,21 @@ class PowerLoss(LossInterface):
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\ \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.} \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases} \end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The sum operation still operates over all the elements, and divides by
:math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction`` to
``sum``.
""" """
def __init__(self, p=2, reduction="mean", relative=False): def __init__(self, p=2, reduction="mean", relative=False):
""" """
:param int p: Degree of Lp norm. It specifies the type of norm to Initialization of the :class:`PowerLoss` class.
be calculated. See `list of possible orders in torch linalg
<https://pytorch.org/docs/stable/generated/ :param int p: Degree of the Lp norm. It specifies the norm to be
torch.linalg.norm.html#torch.linalg.norm>`_ to computed. Default is ``2`` (euclidean norm).
see the possible degrees. Default 2 (euclidean norm). :param str reduction: The reduction method for the loss.
:param str reduction: Specifies the reduction to apply to the output: Available options: ``none``, ``mean``, ``sum``.
``none`` | ``mean`` | ``sum``. When ``none``: no reduction If ``none``, no reduction is applied. If ``mean``, the sum of the
will be applied, ``mean``: the sum of the output will be divided loss values is divided by the number of values. If ``sum``, the loss
by the number of elements in the output, ``sum``: the output will values are summed. Default is ``mean``.
be summed. :param bool relative: If ``True``, the relative error is computed.
:param bool relative: Specifies if relative error should be computed. Default is ``False``.
""" """
super().__init__(reduction=reduction) super().__init__(reduction=reduction)
@@ -71,7 +62,8 @@ class PowerLoss(LossInterface):
self.relative = relative self.relative = relative
def forward(self, input, target): def forward(self, input, target):
"""Forward method for loss function. """
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data. :param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output. :param torch.Tensor target: Model tensor output.

View File

@@ -1,20 +1,41 @@
"""Module for Loss Interface""" """Module for the Scalar Weighting."""
from .weighting_interface import WeightingInterface from .weighting_interface import WeightingInterface
from ..utils import check_consistency from ..utils import check_consistency
class _NoWeighting(WeightingInterface): class _NoWeighting(WeightingInterface):
"""
Weighting scheme that does not apply any weighting to the losses.
"""
def aggregate(self, losses): def aggregate(self, losses):
"""
Aggregate the losses.
:param dict losses: The dictionary of losses.
:return: The aggregated losses.
:rtype: torch.Tensor
"""
return sum(losses.values()) return sum(losses.values())
class ScalarWeighting(WeightingInterface): class ScalarWeighting(WeightingInterface):
""" """
TODO Weighting scheme that assigns a scalar weight to each loss term.
""" """
def __init__(self, weights): def __init__(self, weights):
"""
Initialization of the :class:`ScalarWeighting` class.
:param weights: The weights to be assigned to each loss term.
If a single scalar value is provided, it is assigned to all loss
terms. If a dictionary is provided, the keys are the conditions and
the values are the weights. If a condition is not present in the
dictionary, the default value is used.
:type weights: float | int | dict
"""
super().__init__() super().__init__()
check_consistency([weights], (float, dict, int)) check_consistency([weights], (float, dict, int))
if isinstance(weights, (float, int)): if isinstance(weights, (float, int)):
@@ -28,8 +49,8 @@ class ScalarWeighting(WeightingInterface):
""" """
Aggregate the losses. Aggregate the losses.
:param dict(torch.Tensor) losses: The dictionary of losses. :param dict losses: The dictionary of losses.
:return: The losses aggregation. It should be a scalar Tensor. :return: The aggregated losses.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return sum( return sum(

View File

@@ -1,14 +1,18 @@
"""Module for Loss Interface""" """Module for the Weighting Interface"""
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
class WeightingInterface(metaclass=ABCMeta): class WeightingInterface(metaclass=ABCMeta):
""" """
The ``weightingInterface`` class. TODO Abstract base class for all loss weighting schemas. All weighting schemas
should inherit from this class.
""" """
def __init__(self): def __init__(self):
"""
Initialization of the :class:`WeightingInterface` class.
"""
self.condition_names = None self.condition_names = None
@abstractmethod @abstractmethod
@@ -16,7 +20,5 @@ class WeightingInterface(metaclass=ABCMeta):
""" """
Aggregate the losses. Aggregate the losses.
:param dict(torch.Tensor) input: The dictionary of losses. :param dict losses: The dictionary of losses.
:return: The losses aggregation. It should be a scalar Tensor.
:rtype: torch.Tensor
""" """