fix doc loss and codacy

This commit is contained in:
giovanni
2025-03-12 18:05:42 +01:00
committed by FilippoOlivo
parent 2c9e980c7f
commit da1ac90b99
15 changed files with 114 additions and 108 deletions

View File

@@ -270,7 +270,8 @@ class CartesianDomain(DomainInterface):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the hypercube. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""
is_inside = []

View File

@@ -45,7 +45,8 @@ class Difference(OperationInterface):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""
for geometry in self.geometries[1:]:

View File

@@ -18,7 +18,7 @@ class EllipsoidDomain(DomainInterface):
:param dict ellipsoid_dict: A dictionary where the keys are the variable
names and the values are the domain extrema.
:param bool sample_surface: A flag to choose the sampling strategy.
If ``True``, samples are taken only from the surface of the ellipsoid.
If ``True``, samples are taken from the surface of the ellipsoid.
If ``False``, samples are taken from the interior of the ellipsoid.
Default is ``False``.
:raises TypeError: If the input dictionary is not correctly formatted.
@@ -26,7 +26,8 @@ class EllipsoidDomain(DomainInterface):
.. warning::
Sampling for dimensions greater or equal to 10 could result in a
shrinkage of the ellipsoid, which degrades the quality of the
samples. For dimensions higher than 10, use other sampling algorithms.
samples. For dimensions higher than 10, use other sampling
algorithms.
.. seealso::
**Original reference**: Dezert, Jean, and Musso, Christian.
*An efficient method for generating points uniformly distributed
@@ -101,7 +102,8 @@ class EllipsoidDomain(DomainInterface):
the ellipsoid. Default is ``False``.
:raises ValueError: If the labels of the point are different from those
passed in the ``__init__`` method.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
.. note::
@@ -147,7 +149,6 @@ class EllipsoidDomain(DomainInterface):
return bool(eqn < 0)
def _sample_range(self, n, mode, variables):
""""""
"""
Rescale the samples to fit within the specified bounds.

View File

@@ -44,7 +44,8 @@ class Exclusion(OperationInterface):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""
flag = 0

View File

@@ -24,8 +24,8 @@ class Intersection(OperationInterface):
Initialization of the :class:`Intersection` class.
:param list[DomainInterface] geometries: A list of instances of the
:class:`~pina.domain.DomainInterface` class on which the intersection
operation is performed.
:class:`~pina.domain.DomainInterface` class on which the
intersection operation is performed.
:Example:
>>> # Create two ellipsoid domains
@@ -43,7 +43,8 @@ class Intersection(OperationInterface):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""
flag = 0

View File

@@ -70,7 +70,8 @@ class OperationInterface(DomainInterface, metaclass=ABCMeta):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the resulting domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""

View File

@@ -98,7 +98,7 @@ class SimplexDomain(DomainInterface):
"""
Build the cartesian border for a simplex domain to be used in sampling.
:param list[LabelTensor] vertices: Matrix of vertices defining the domain.
:param list[LabelTensor] vertices: list of vertices defining the domain.
:return: The cartesian border for the simplex domain.
:rtype: CartesianDomain
"""
@@ -124,7 +124,8 @@ class SimplexDomain(DomainInterface):
the simplex. Default is ``False``.
:raises ValueError: If the labels of the point are different from those
passed in the ``__init__`` method.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""

View File

@@ -51,7 +51,8 @@ class Union(OperationInterface):
:param LabelTensor point: Point to be checked.
:param bool check_border: If ``True``, the border is considered inside
the domain. Default is ``False``.
:return: ``True`` if the point is inside the domain, ``False`` otherwise.
:return: ``True`` if the point is inside the domain,
``False`` otherwise.
:rtype: bool
"""
for geometry in self.geometries:

View File

@@ -53,7 +53,8 @@ class SystemEquation(EquationInterface):
aggregate it according to the ``reduction`` specified in the
``__init__`` method.
:param LabelTensor input_: Input points where each equation is evaluated.
:param LabelTensor input_: Input points where each equation of the
system is evaluated.
:param LabelTensor output_: Output tensor, eventually produced by a
:class:`~torch.nn.Module` instance.
:param dict params_: Dictionary of unknown parameters, associated with a

View File

@@ -1,6 +1,4 @@
"""
Module for loss functions and weighting functions.
"""
"""Module for loss functions and weighting functions."""
__all__ = [
"LossInterface",

View File

@@ -1,4 +1,4 @@
"""Module for Loss Interface"""
"""Module for the Loss Interface"""
from abc import ABCMeta, abstractmethod
from torch.nn.modules.loss import _Loss
@@ -7,45 +7,37 @@ import torch
class LossInterface(_Loss, metaclass=ABCMeta):
"""
The abstract ``LossInterface`` class. All the class defining a PINA Loss
should be inheritied from this class.
Abstract base class for all losses. All classes defining a loss function
should inherit from this interface.
"""
def __init__(self, reduction="mean"):
"""
:param str reduction: Specifies the reduction to apply to the output:
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
will be applied, ``mean``: the sum of the output will be divided
by the number of elements in the output, ``sum``: the output will
be summed. Note: ``size_average`` and ``reduce`` are in the
process of being deprecated, and in the meantime, specifying either
of those two args will override ``reduction``. Default: ``mean``.
Initialization of the :class:`LossInterface` class.
:param str reduction: The reduction method for the loss.
Available options: ``none``, ``mean``, ``sum``.
If ``none``, no reduction is applied. If ``mean``, the sum of the
loss values is divided by the number of values. If ``sum``, the loss
values are summed. Default is ``mean``.
"""
super().__init__(reduction=reduction, size_average=None, reduce=None)
@abstractmethod
def forward(self, input, target):
"""Forward method for loss function.
"""
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output.
:return: Loss evaluation.
:rtype: torch.Tensor
"""
def _reduction(self, loss):
"""Simple helper function to check reduction
"""
Apply the reduction to the loss.
:param reduction: Specifies the reduction to apply to the output:
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
will be applied, ``mean``: the sum of the output will be divided
by the number of elements in the output, ``sum``: the output will
be summed. Note: ``size_average`` and ``reduce`` are in the
process of being deprecated, and in the meantime, specifying either
of those two args will override ``reduction``. Default: ``mean``.
:type reduction: str
:param loss: Loss tensor for each element.
:type loss: torch.Tensor
:param torch.Tensor loss: The tensor containing the pointwise losses.
:raises ValueError: If the reduction method is not valid.
:return: Reduced loss.
:rtype: torch.Tensor
"""

View File

@@ -8,26 +8,26 @@ from .loss_interface import LossInterface
class LpLoss(LossInterface):
r"""
The Lp loss implementation class. Creates a criterion that measures
the Lp error between each element in the input :math:`x` and
Implementation of the Lp Loss. It defines a criterion to measures the
pointwise Lp error between values in the input :math:`x` and values in the
target :math:`y`.
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
be described as:
If ``reduction`` is set to ``none``, the loss can be written as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
If ``'relative'`` is set to true:
If ``relative`` is set to ``True``, the relative Lp error is computed:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }
{[\sum_{i=1}^{D}|y_n^i|^p]},
where :math:`N` is the batch size. If ``reduction`` is not ``none``
(default ``mean``), then:
where :math:`N` is the batch size.
If ``reduction`` is not ``none``, then:
.. math::
\ell(x, y) =
@@ -35,30 +35,21 @@ class LpLoss(LossInterface):
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The sum operation still operates over all the elements, and divides by
:math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction`` to
``sum``.
"""
def __init__(self, p=2, reduction="mean", relative=False):
"""
:param int p: Degree of Lp norm. It specifies the type of norm to
be calculated. See `list of possible orders in torch linalg
`torch.linalg.norm <https://pytorch.org/docs/stable/generated/
torch.linalg.norm.html#torch.linalg.norm>`_
for possible degrees. Default 2 (euclidean norm).
:param str reduction: Specifies the reduction to apply to the output:
``none`` | ``mean`` | ``sum``. ``none``: no reduction
will be applied, ``mean``: the sum of the output will be divided
by the number of elements in the output, ``sum``: the output will
be summed.
:param bool relative: Specifies if relative error should be computed.
Initialization of the :class:`LpLoss` class.
:param int p: Degree of the Lp norm. It specifies the norm to be
computed. Default is ``2`` (euclidean norm).
:param str reduction: The reduction method for the loss.
Available options: ``none``, ``mean``, ``sum``.
If ``none``, no reduction is applied. If ``mean``, the sum of the
loss values is divided by the number of values. If ``sum``, the loss
values are summed. Default is ``mean``.
:param bool relative: If ``True``, the relative error is computed.
Default is ``False``.
"""
super().__init__(reduction=reduction)
@@ -70,7 +61,8 @@ class LpLoss(LossInterface):
self.relative = relative
def forward(self, input, target):
"""Forward method for loss function.
"""
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output.

View File

@@ -8,27 +8,27 @@ from .loss_interface import LossInterface
class PowerLoss(LossInterface):
r"""
The PowerLoss loss implementation class. Creates a criterion that measures
the error between each element in the input :math:`x` and
target :math:`y` powered to a specific integer.
Implementation of the Power Loss. It defines a criterion to measures the
pointwise error between values in the input :math:`x` and values in the
target :math:`y`.
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
be described as:
If ``reduction`` is set to ``none``, the loss can be written as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
\left| x_n^i - y_n^i \right|^p\right],
If ``'relative'`` is set to true:
If ``relative`` is set to ``True``, the relative error is computed:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
{\sum_{i=1}^{D}|y_n^i|^p},
where :math:`N` is the batch size. If ``reduction`` is not ``none``
(default ``mean``), then:
where :math:`N` is the batch size.
If ``reduction`` is not ``none``, then:
.. math::
\ell(x, y) =
@@ -36,30 +36,21 @@ class PowerLoss(LossInterface):
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The sum operation still operates over all the elements, and divides by
:math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction`` to
``sum``.
"""
def __init__(self, p=2, reduction="mean", relative=False):
"""
:param int p: Degree of Lp norm. It specifies the type of norm to
be calculated. See `list of possible orders in torch linalg
<https://pytorch.org/docs/stable/generated/
torch.linalg.norm.html#torch.linalg.norm>`_ to
see the possible degrees. Default 2 (euclidean norm).
:param str reduction: Specifies the reduction to apply to the output:
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
will be applied, ``mean``: the sum of the output will be divided
by the number of elements in the output, ``sum``: the output will
be summed.
:param bool relative: Specifies if relative error should be computed.
Initialization of the :class:`PowerLoss` class.
:param int p: Degree of the Lp norm. It specifies the norm to be
computed. Default is ``2`` (euclidean norm).
:param str reduction: The reduction method for the loss.
Available options: ``none``, ``mean``, ``sum``.
If ``none``, no reduction is applied. If ``mean``, the sum of the
loss values is divided by the number of values. If ``sum``, the loss
values are summed. Default is ``mean``.
:param bool relative: If ``True``, the relative error is computed.
Default is ``False``.
"""
super().__init__(reduction=reduction)
@@ -71,7 +62,8 @@ class PowerLoss(LossInterface):
self.relative = relative
def forward(self, input, target):
"""Forward method for loss function.
"""
Forward method of the loss function.
:param torch.Tensor input: Input tensor from real data.
:param torch.Tensor target: Model tensor output.

View File

@@ -1,20 +1,41 @@
"""Module for Loss Interface"""
"""Module for the Scalar Weighting."""
from .weighting_interface import WeightingInterface
from ..utils import check_consistency
class _NoWeighting(WeightingInterface):
"""
Weighting scheme that does not apply any weighting to the losses.
"""
def aggregate(self, losses):
"""
Aggregate the losses.
:param dict losses: The dictionary of losses.
:return: The aggregated losses.
:rtype: torch.Tensor
"""
return sum(losses.values())
class ScalarWeighting(WeightingInterface):
"""
TODO
Weighting scheme that assigns a scalar weight to each loss term.
"""
def __init__(self, weights):
"""
Initialization of the :class:`ScalarWeighting` class.
:param weights: The weights to be assigned to each loss term.
If a single scalar value is provided, it is assigned to all loss
terms. If a dictionary is provided, the keys are the conditions and
the values are the weights. If a condition is not present in the
dictionary, the default value is used.
:type weights: float | int | dict
"""
super().__init__()
check_consistency([weights], (float, dict, int))
if isinstance(weights, (float, int)):
@@ -28,8 +49,8 @@ class ScalarWeighting(WeightingInterface):
"""
Aggregate the losses.
:param dict(torch.Tensor) losses: The dictionary of losses.
:return: The losses aggregation. It should be a scalar Tensor.
:param dict losses: The dictionary of losses.
:return: The aggregated losses.
:rtype: torch.Tensor
"""
return sum(

View File

@@ -1,14 +1,18 @@
"""Module for Loss Interface"""
"""Module for the Weighting Interface"""
from abc import ABCMeta, abstractmethod
class WeightingInterface(metaclass=ABCMeta):
"""
The ``weightingInterface`` class. TODO
Abstract base class for all loss weighting schemas. All weighting schemas
should inherit from this class.
"""
def __init__(self):
"""
Initialization of the :class:`WeightingInterface` class.
"""
self.condition_names = None
@abstractmethod
@@ -16,7 +20,5 @@ class WeightingInterface(metaclass=ABCMeta):
"""
Aggregate the losses.
:param dict(torch.Tensor) input: The dictionary of losses.
:return: The losses aggregation. It should be a scalar Tensor.
:rtype: torch.Tensor
:param dict losses: The dictionary of losses.
"""