Fix Codacy Warnings (#477)
--------- Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
committed by
Nicola Demo
parent
e3790e049a
commit
4177bfbb50
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
Module for loss functions and weighting functions.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
"LossInterface",
|
||||
"LpLoss",
|
||||
|
||||
@@ -18,8 +18,8 @@ class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
process of being deprecated, and in the meantime, specifying either
|
||||
of those two args will override ``reduction``. Default: ``mean``.
|
||||
"""
|
||||
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
||||
|
||||
@@ -32,7 +32,6 @@ class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
pass
|
||||
|
||||
def _reduction(self, loss):
|
||||
"""Simple helper function to check reduction
|
||||
@@ -42,8 +41,8 @@ class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
process of being deprecated, and in the meantime, specifying either
|
||||
of those two args will override ``reduction``. Default: ``mean``.
|
||||
:type reduction: str
|
||||
:param loss: Loss tensor for each element.
|
||||
:type loss: torch.Tensor
|
||||
|
||||
@@ -23,7 +23,8 @@ class LpLoss(LossInterface):
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }
|
||||
{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
@@ -38,16 +39,19 @@ class LpLoss(LossInterface):
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
The sum operation still operates over all the elements, and divides by
|
||||
:math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to
|
||||
``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
`torch.linalg.norm <https://pytorch.org/docs/stable/generated/
|
||||
torch.linalg.norm.html#torch.linalg.norm>`_
|
||||
for possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
||||
|
||||
@@ -17,13 +17,15 @@ class PowerLoss(LossInterface):
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
|
||||
\left| x_n^i - y_n^i \right|^p\right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
|
||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
|
||||
{\sum_{i=1}^{D}|y_n^i|^p},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
@@ -38,16 +40,19 @@ class PowerLoss(LossInterface):
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
The sum operation still operates over all the elements, and divides by
|
||||
:math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to
|
||||
``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
<https://pytorch.org/docs/stable/generated/
|
||||
torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
see the possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
|
||||
@@ -20,4 +20,3 @@ class WeightingInterface(metaclass=ABCMeta):
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
pass
|
||||
|
||||
Reference in New Issue
Block a user