fix doc loss and codacy
This commit is contained in:
@@ -1,6 +1,4 @@
|
||||
"""
|
||||
Module for loss functions and weighting functions.
|
||||
"""
|
||||
"""Module for loss functions and weighting functions."""
|
||||
|
||||
__all__ = [
|
||||
"LossInterface",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""Module for Loss Interface"""
|
||||
"""Module for the Loss Interface"""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from torch.nn.modules.loss import _Loss
|
||||
@@ -7,45 +7,37 @@ import torch
|
||||
|
||||
class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
"""
|
||||
The abstract ``LossInterface`` class. All the class defining a PINA Loss
|
||||
should be inheritied from this class.
|
||||
Abstract base class for all losses. All classes defining a loss function
|
||||
should inherit from this interface.
|
||||
"""
|
||||
|
||||
def __init__(self, reduction="mean"):
|
||||
"""
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either
|
||||
of those two args will override ``reduction``. Default: ``mean``.
|
||||
Initialization of the :class:`LossInterface` class.
|
||||
|
||||
:param str reduction: The reduction method for the loss.
|
||||
Available options: ``none``, ``mean``, ``sum``.
|
||||
If ``none``, no reduction is applied. If ``mean``, the sum of the
|
||||
loss values is divided by the number of values. If ``sum``, the loss
|
||||
values are summed. Default is ``mean``.
|
||||
"""
|
||||
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
"""
|
||||
Forward method of the loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
|
||||
def _reduction(self, loss):
|
||||
"""Simple helper function to check reduction
|
||||
"""
|
||||
Apply the reduction to the loss.
|
||||
|
||||
:param reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either
|
||||
of those two args will override ``reduction``. Default: ``mean``.
|
||||
:type reduction: str
|
||||
:param loss: Loss tensor for each element.
|
||||
:type loss: torch.Tensor
|
||||
:param torch.Tensor loss: The tensor containing the pointwise losses.
|
||||
:raises ValueError: If the reduction method is not valid.
|
||||
:return: Reduced loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
|
||||
@@ -8,26 +8,26 @@ from .loss_interface import LossInterface
|
||||
|
||||
class LpLoss(LossInterface):
|
||||
r"""
|
||||
The Lp loss implementation class. Creates a criterion that measures
|
||||
the Lp error between each element in the input :math:`x` and
|
||||
Implementation of the Lp Loss. It defines a criterion to measures the
|
||||
pointwise Lp error between values in the input :math:`x` and values in the
|
||||
target :math:`y`.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
If ``reduction`` is set to ``none``, the loss can be written as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
If ``relative`` is set to ``True``, the relative Lp error is computed:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }
|
||||
{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
where :math:`N` is the batch size.
|
||||
|
||||
If ``reduction`` is not ``none``, then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
@@ -35,30 +35,21 @@ class LpLoss(LossInterface):
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by
|
||||
:math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to
|
||||
``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
`torch.linalg.norm <https://pytorch.org/docs/stable/generated/
|
||||
torch.linalg.norm.html#torch.linalg.norm>`_
|
||||
for possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
Initialization of the :class:`LpLoss` class.
|
||||
|
||||
:param int p: Degree of the Lp norm. It specifies the norm to be
|
||||
computed. Default is ``2`` (euclidean norm).
|
||||
:param str reduction: The reduction method for the loss.
|
||||
Available options: ``none``, ``mean``, ``sum``.
|
||||
If ``none``, no reduction is applied. If ``mean``, the sum of the
|
||||
loss values is divided by the number of values. If ``sum``, the loss
|
||||
values are summed. Default is ``mean``.
|
||||
:param bool relative: If ``True``, the relative error is computed.
|
||||
Default is ``False``.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
@@ -70,7 +61,8 @@ class LpLoss(LossInterface):
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
"""
|
||||
Forward method of the loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
|
||||
@@ -8,27 +8,27 @@ from .loss_interface import LossInterface
|
||||
|
||||
class PowerLoss(LossInterface):
|
||||
r"""
|
||||
The PowerLoss loss implementation class. Creates a criterion that measures
|
||||
the error between each element in the input :math:`x` and
|
||||
target :math:`y` powered to a specific integer.
|
||||
Implementation of the Power Loss. It defines a criterion to measures the
|
||||
pointwise error between values in the input :math:`x` and values in the
|
||||
target :math:`y`.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
If ``reduction`` is set to ``none``, the loss can be written as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
|
||||
\left| x_n^i - y_n^i \right|^p\right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
If ``relative`` is set to ``True``, the relative error is computed:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
|
||||
{\sum_{i=1}^{D}|y_n^i|^p},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
where :math:`N` is the batch size.
|
||||
|
||||
If ``reduction`` is not ``none``, then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
@@ -36,30 +36,21 @@ class PowerLoss(LossInterface):
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by
|
||||
:math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to
|
||||
``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/
|
||||
torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
see the possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
Initialization of the :class:`PowerLoss` class.
|
||||
|
||||
:param int p: Degree of the Lp norm. It specifies the norm to be
|
||||
computed. Default is ``2`` (euclidean norm).
|
||||
:param str reduction: The reduction method for the loss.
|
||||
Available options: ``none``, ``mean``, ``sum``.
|
||||
If ``none``, no reduction is applied. If ``mean``, the sum of the
|
||||
loss values is divided by the number of values. If ``sum``, the loss
|
||||
values are summed. Default is ``mean``.
|
||||
:param bool relative: If ``True``, the relative error is computed.
|
||||
Default is ``False``.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
@@ -71,7 +62,8 @@ class PowerLoss(LossInterface):
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
"""
|
||||
Forward method of the loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
|
||||
@@ -1,20 +1,41 @@
|
||||
"""Module for Loss Interface"""
|
||||
"""Module for the Scalar Weighting."""
|
||||
|
||||
from .weighting_interface import WeightingInterface
|
||||
from ..utils import check_consistency
|
||||
|
||||
|
||||
class _NoWeighting(WeightingInterface):
|
||||
"""
|
||||
Weighting scheme that does not apply any weighting to the losses.
|
||||
"""
|
||||
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict losses: The dictionary of losses.
|
||||
:return: The aggregated losses.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return sum(losses.values())
|
||||
|
||||
|
||||
class ScalarWeighting(WeightingInterface):
|
||||
"""
|
||||
TODO
|
||||
Weighting scheme that assigns a scalar weight to each loss term.
|
||||
"""
|
||||
|
||||
def __init__(self, weights):
|
||||
"""
|
||||
Initialization of the :class:`ScalarWeighting` class.
|
||||
|
||||
:param weights: The weights to be assigned to each loss term.
|
||||
If a single scalar value is provided, it is assigned to all loss
|
||||
terms. If a dictionary is provided, the keys are the conditions and
|
||||
the values are the weights. If a condition is not present in the
|
||||
dictionary, the default value is used.
|
||||
:type weights: float | int | dict
|
||||
"""
|
||||
super().__init__()
|
||||
check_consistency([weights], (float, dict, int))
|
||||
if isinstance(weights, (float, int)):
|
||||
@@ -28,8 +49,8 @@ class ScalarWeighting(WeightingInterface):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict(torch.Tensor) losses: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:param dict losses: The dictionary of losses.
|
||||
:return: The aggregated losses.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return sum(
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
"""Module for Loss Interface"""
|
||||
"""Module for the Weighting Interface"""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class WeightingInterface(metaclass=ABCMeta):
|
||||
"""
|
||||
The ``weightingInterface`` class. TODO
|
||||
Abstract base class for all loss weighting schemas. All weighting schemas
|
||||
should inherit from this class.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialization of the :class:`WeightingInterface` class.
|
||||
"""
|
||||
self.condition_names = None
|
||||
|
||||
@abstractmethod
|
||||
@@ -16,7 +20,5 @@ class WeightingInterface(metaclass=ABCMeta):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict(torch.Tensor) input: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
:param dict losses: The dictionary of losses.
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user