Fix Codacy Warnings (#477)

---------

Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
Filippo Olivo
2025-03-10 15:38:45 +01:00
committed by Nicola Demo
parent e3790e049a
commit 4177bfbb50
157 changed files with 3473 additions and 3839 deletions

View File

@@ -1,3 +1,7 @@
"""
Adaptive Activation Functions Module.
"""
__all__ = [
"AdaptiveActivationFunctionInterface",
"AdaptiveReLU",

View File

@@ -15,7 +15,7 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{ReLU}_{\text{adaptive}}({x}) = \alpha\,\text{ReLU}(\beta{x}+\gamma),
\text{ReLU}_{\text{adaptive}}({x})=\alpha\,\text{ReLU}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
ReLU function is defined as:
@@ -50,13 +50,15 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface):
r"""
Adaptive trainable :class:`~torch.nn.Sigmoid` activation function.
Given the function :math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
Given the function
:math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
the adaptive function
:math:`\text{Sigmoid}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
is defined as:
.. math::
\text{Sigmoid}_{\text{adaptive}}({x}) = \alpha\,\text{Sigmoid}(\beta{x}+\gamma),
\text{Sigmoid}_{\text{adaptive}}({x})=
\alpha\,\text{Sigmoid}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Sigmoid function is defined as:
@@ -97,7 +99,7 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{Tanh}_{\text{adaptive}}({x}) = \alpha\,\text{Tanh}(\beta{x}+\gamma),
\text{Tanh}_{\text{adaptive}}({x})=\alpha\,\text{Tanh}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Tanh function is defined as:
@@ -138,7 +140,7 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{SiLU}_{\text{adaptive}}({x}) = \alpha\,\text{SiLU}(\beta{x}+\gamma),
\text{SiLU}_{\text{adaptive}}({x})=\alpha\,\text{SiLU}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
SiLU function is defined as:
@@ -180,7 +182,7 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{Mish}_{\text{adaptive}}({x}) = \alpha\,\text{Mish}(\beta{x}+\gamma),
\text{Mish}_{\text{adaptive}}({x})=\alpha\,\text{Mish}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Mish function is defined as:
@@ -265,7 +267,7 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{CELU}_{\text{adaptive}}({x}) = \alpha\,\text{CELU}(\beta{x}+\gamma),
\text{CELU}_{\text{adaptive}}({x})=\alpha\,\text{CELU}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
CELU function is defined as:
@@ -306,13 +308,13 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface):
is defined as:
.. math::
\text{GELU}_{\text{adaptive}}({x}) = \alpha\,\text{GELU}(\beta{x}+\gamma),
\text{GELU}_{\text{adaptive}}({x})=\alpha\,\text{GELU}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
GELU function is defined as:
.. math::
\text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
\text{GELU}(x)=0.5*x*(1+\text{Tanh}(\sqrt{2 / \pi}*(x+0.044715*x^3)))
.. seealso::
@@ -342,13 +344,15 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface):
r"""
Adaptive trainable :class:`~torch.nn.Softmin` activation function.
Given the function :math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
Given the function
:math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
the adaptive function
:math:`\text{Softmin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
is defined as:
.. math::
\text{Softmin}_{\text{adaptive}}({x}) = \alpha\,\text{Softmin}(\beta{x}+\gamma),
\text{Softmin}_{\text{adaptive}}({x})=\alpha\,
\text{Softmin}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Softmin function is defined as:
@@ -383,13 +387,15 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface):
r"""
Adaptive trainable :class:`~torch.nn.Softmax` activation function.
Given the function :math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
Given the function
:math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
the adaptive function
:math:`\text{Softmax}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
is defined as:
.. math::
\text{Softmax}_{\text{adaptive}}({x}) = \alpha\,\text{Softmax}(\beta{x}+\gamma),
\text{Softmax}_{\text{adaptive}}({x})=\alpha\,
\text{Softmax}(\beta{x}+\gamma),
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Softmax function is defined as:

View File

@@ -1,15 +1,15 @@
"""Module for adaptive functions."""
import torch
from pina.utils import check_consistency
from abc import ABCMeta
import torch
from ..utils import check_consistency, is_function
class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
r"""
The
:class:`~pina.adaptive_function.adaptive_func_interface.AdaptiveActivationFunctionInterface`
:class:`~pina.adaptive_function.adaptive_func_interface.\
AdaptiveActivationFunctionInterface`
class makes a :class:`torch.nn.Module` activation function into an adaptive
trainable activation function. If one wants to create an adpative activation
function, this class must be use as base class.
@@ -104,9 +104,6 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
else:
self.register_buffer("gamma", gamma)
# storing the activation
self._func = None
def forward(self, x):
"""
Define the computation performed at every call.
@@ -144,3 +141,13 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
The callable activation function.
"""
return self._func
@func.setter
def func(self, value):
"""
Set the activation function.
"""
if not is_function(value):
raise TypeError("The function must be callable.")
self._func = value
return self._func