fixing adaptive functions
This commit is contained in:
committed by
Nicola Demo
parent
50fb3b731c
commit
4f5d9559b2
@@ -74,7 +74,27 @@ Layers
|
|||||||
Continuous convolution <layers/convolution.rst>
|
Continuous convolution <layers/convolution.rst>
|
||||||
Proper Orthogonal Decomposition <layers/pod.rst>
|
Proper Orthogonal Decomposition <layers/pod.rst>
|
||||||
Periodic Boundary Condition embeddings <layers/embedding.rst>
|
Periodic Boundary Condition embeddings <layers/embedding.rst>
|
||||||
Adpative Activation Function <layers/adaptive_func.rst>
|
|
||||||
|
Adaptive Activation Functions
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:titlesonly:
|
||||||
|
|
||||||
|
Adaptive Function Interface <adaptive_functions/AdaptiveFunctionInterface.rst>
|
||||||
|
Adaptive ReLU <adaptive_functions/AdaptiveReLU.rst>
|
||||||
|
Adaptive Sigmoid <adaptive_functions/AdaptiveSigmoid.rst>
|
||||||
|
Adaptive Tanh <adaptive_functions/AdaptiveTanh.rst>
|
||||||
|
Adaptive SiLU <adaptive_functions/AdaptiveSiLU.rst>
|
||||||
|
Adaptive Mish <adaptive_functions/AdaptiveMish.rst>
|
||||||
|
Adaptive ELU <adaptive_functions/AdaptiveELU.rst>
|
||||||
|
Adaptive CELU <adaptive_functions/AdaptiveCELU.rst>
|
||||||
|
Adaptive GELU <adaptive_functions/AdaptiveGELU.rst>
|
||||||
|
Adaptive Softmin <adaptive_functions/AdaptiveSoftmin.rst>
|
||||||
|
Adaptive Softmax <adaptive_functions/AdaptiveSoftmax.rst>
|
||||||
|
Adaptive SIREN <adaptive_functions/AdaptiveSIREN.rst>
|
||||||
|
Adaptive Exp <adaptive_functions/AdaptiveExp.rst>
|
||||||
|
|
||||||
|
|
||||||
Equations and Operators
|
Equations and Operators
|
||||||
-------------------------
|
-------------------------
|
||||||
|
|||||||
9
docs/source/_rst/adaptive_functions/AdaptiveCELU.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveCELU.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveCELU
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveCELU
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveELU.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveELU.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveELU
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveELU
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveExp.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveExp.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveExp
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveExp
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
AdaptiveActivationFunctionInterface
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func_interface
|
||||||
|
|
||||||
|
.. automodule:: pina.adaptive_functions.adaptive_func_interface
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveGELU.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveGELU.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveGELU
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveGELU
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveMish.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveMish.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveMish
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveMish
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveReLU.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveReLU.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveReLU
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveReLU
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveSIREN.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveSIREN.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveSIREN
|
||||||
|
=============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveSIREN
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveSiLU.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveSiLU.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveSiLU
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveSiLU
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveSigmoid.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveSigmoid.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveSigmoid
|
||||||
|
===============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveSigmoid
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveSoftmax.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveSoftmax.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveSoftmax
|
||||||
|
===============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveSoftmax
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveSoftmin.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveSoftmin.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveSoftmin
|
||||||
|
===============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveSoftmin
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
9
docs/source/_rst/adaptive_functions/AdaptiveTanh.rst
Normal file
9
docs/source/_rst/adaptive_functions/AdaptiveTanh.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
AdaptiveTanh
|
||||||
|
============
|
||||||
|
|
||||||
|
.. currentmodule:: pina.adaptive_functions.adaptive_func
|
||||||
|
|
||||||
|
.. autoclass:: AdaptiveTanh
|
||||||
|
:members:
|
||||||
|
:show-inheritance:
|
||||||
|
:inherited-members: AdaptiveActivationFunctionInterface
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
AdaptiveActivationFunction
|
|
||||||
=============================
|
|
||||||
.. currentmodule:: pina.model.layers.adaptive_func
|
|
||||||
|
|
||||||
.. autoclass:: AdaptiveActivationFunction
|
|
||||||
:members:
|
|
||||||
:show-inheritance:
|
|
||||||
21
pina/adaptive_functions/__init__.py
Normal file
21
pina/adaptive_functions/__init__.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
__all__ = [
|
||||||
|
'AdaptiveActivationFunctionInterface',
|
||||||
|
'AdaptiveReLU',
|
||||||
|
'AdaptiveSigmoid',
|
||||||
|
'AdaptiveTanh',
|
||||||
|
'AdaptiveSiLU',
|
||||||
|
'AdaptiveMish',
|
||||||
|
'AdaptiveELU',
|
||||||
|
'AdaptiveCELU',
|
||||||
|
'AdaptiveGELU',
|
||||||
|
'AdaptiveSoftmin',
|
||||||
|
'AdaptiveSoftmax',
|
||||||
|
'AdaptiveSIREN',
|
||||||
|
'AdaptiveExp']
|
||||||
|
|
||||||
|
from .adaptive_func import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh,
|
||||||
|
AdaptiveSiLU, AdaptiveMish, AdaptiveELU,
|
||||||
|
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin,
|
||||||
|
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp)
|
||||||
|
from .adaptive_func_interface import AdaptiveActivationFunctionInterface
|
||||||
|
|
||||||
488
pina/adaptive_functions/adaptive_func.py
Normal file
488
pina/adaptive_functions/adaptive_func.py
Normal file
@@ -0,0 +1,488 @@
|
|||||||
|
""" Module for adaptive functions. """
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from ..utils import check_consistency
|
||||||
|
from .adaptive_func_interface import AdaptiveActivationFunctionInterface
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveReLU(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.ReLU` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{ReLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{ReLU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{ReLU}_{\text{adaptive}}({x}) = \alpha\,\text{ReLU}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
ReLU function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{ReLU}(x) = \max(0, x)
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.ReLU()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveSigmoid(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.Sigmoid` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{Sigmoid}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{Sigmoid}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Sigmoid}_{\text{adaptive}}({x}) = \alpha\,\text{Sigmoid}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
Sigmoid function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.Sigmoid()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveTanh(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.Tanh` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{Tanh}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{Tanh}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Tanh}_{\text{adaptive}}({x}) = \alpha\,\text{Tanh}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
Tanh function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Tanh}(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.Tanh()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveSiLU(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.SiLU` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{SiLU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{SiLU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{SiLU}_{\text{adaptive}}({x}) = \alpha\,\text{SiLU}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
SiLU function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{SiLU}(x) = x * \sigma(x), \text{where }\sigma(x)
|
||||||
|
\text{ is the logistic sigmoid.}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.SiLU()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveMish(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.Mish` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{Mish}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{Mish}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Mish}_{\text{adaptive}}({x}) = \alpha\,\text{Mish}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
Mish function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Mish}(x) = x * \text{Tanh}(x)
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.Mish()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveELU(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.ELU` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{ELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{ELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{ELU}_{\text{adaptive}}({x}) = \alpha\,\text{ELU}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
ELU function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{ELU}(x) = \begin{cases}
|
||||||
|
x, & \text{ if }x > 0\\
|
||||||
|
\exp(x) - 1, & \text{ if }x \leq 0
|
||||||
|
\end{cases}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.ELU()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveCELU(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.CELU` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{CELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{CELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{CELU}_{\text{adaptive}}({x}) = \alpha\,\text{CELU}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
CELU function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.CELU()
|
||||||
|
|
||||||
|
class AdaptiveGELU(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.GELU` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{GELU}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{GELU}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{GELU}_{\text{adaptive}}({x}) = \alpha\,\text{GELU}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
GELU function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
|
||||||
|
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.GELU()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveSoftmin(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.Softmin` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{Softmin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{Softmin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Softmin}_{\text{adaptive}}({x}) = \alpha\,\text{Softmin}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
Softmin function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.Softmin()
|
||||||
|
|
||||||
|
|
||||||
|
class AdaptiveSoftmax(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :class:`~torch.nn.Softmax` activation function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{Softmax}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{Softmax}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Softmax}_{\text{adaptive}}({x}) = \alpha\,\text{Softmax}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
|
||||||
|
Softmax function is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.nn.Softmax()
|
||||||
|
|
||||||
|
class AdaptiveSIREN(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :obj:`~torch.sin` function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{sin}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{sin}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{sin}_{\text{adaptive}}({x}) = \alpha\,\text{sin}(\beta{x}+\gamma),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
|
super().__init__(alpha, beta, gamma, fixed)
|
||||||
|
self._func = torch.sin
|
||||||
|
|
||||||
|
class AdaptiveExp(AdaptiveActivationFunctionInterface):
|
||||||
|
r"""
|
||||||
|
Adaptive trainable :obj:`~torch.exp` function.
|
||||||
|
|
||||||
|
Given the function :math:`\text{exp}:\mathbb{R}^n\rightarrow\mathbb{R}^n`,
|
||||||
|
the adaptive function
|
||||||
|
:math:`\text{exp}_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^n`
|
||||||
|
is defined as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\text{exp}_{\text{adaptive}}({x}) = \alpha\,\text{exp}(\beta{x}),
|
||||||
|
|
||||||
|
where :math:`\alpha,\,\beta` are trainable parameters.
|
||||||
|
|
||||||
|
.. seealso::
|
||||||
|
|
||||||
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
|
*A continuum among logarithmic, linear, and exponential functions,
|
||||||
|
and its potential to improve generalization in neural networks.*
|
||||||
|
2015 7th international joint conference on knowledge discovery,
|
||||||
|
knowledge engineering and knowledge management (IC3K).
|
||||||
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
|
"""
|
||||||
|
def __init__(self, alpha=None, beta=None, fixed=None):
|
||||||
|
|
||||||
|
# only alpha, and beta parameters (gamma=0 fixed)
|
||||||
|
if fixed is None:
|
||||||
|
fixed = ['gamma']
|
||||||
|
else:
|
||||||
|
check_consistency(fixed, str)
|
||||||
|
fixed = list(fixed) + ['gamma']
|
||||||
|
|
||||||
|
# calling super
|
||||||
|
super().__init__(alpha, beta, 0., fixed)
|
||||||
|
self._func = torch.exp
|
||||||
@@ -1,14 +1,18 @@
|
|||||||
""" Module for adaptive functions. """
|
""" Module for adaptive functions. """
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from pina.utils import check_consistency
|
from pina.utils import check_consistency
|
||||||
|
from abc import ABCMeta
|
||||||
|
|
||||||
|
|
||||||
class AdaptiveActivationFunction(torch.nn.Module):
|
class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
|
||||||
r"""
|
r"""
|
||||||
The :class:`~pina.model.layers.adaptive_func.AdaptiveActivationFunction`
|
The
|
||||||
|
:class:`~pina.adaptive_functions.adaptive_func_interface.AdaptiveActivationFunctionInterface`
|
||||||
class makes a :class:`torch.nn.Module` activation function into an adaptive
|
class makes a :class:`torch.nn.Module` activation function into an adaptive
|
||||||
trainable activation function.
|
trainable activation function. If one wants to create an adpative activation
|
||||||
|
function, this class must be use as base class.
|
||||||
|
|
||||||
Given a function :math:`f:\mathbb{R}^n\rightarrow\mathbb{R}^m`, the adaptive
|
Given a function :math:`f:\mathbb{R}^n\rightarrow\mathbb{R}^m`, the adaptive
|
||||||
function :math:`f_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^m`
|
function :math:`f_{\text{adaptive}}:\mathbb{R}^n\rightarrow\mathbb{R}^m`
|
||||||
@@ -19,28 +23,6 @@ class AdaptiveActivationFunction(torch.nn.Module):
|
|||||||
|
|
||||||
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters.
|
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters.
|
||||||
|
|
||||||
:Example:
|
|
||||||
>>> import torch
|
|
||||||
>>> from pina.model.layers import AdaptiveActivationFunction
|
|
||||||
>>>
|
|
||||||
>>> # simple adaptive function with all trainable parameters
|
|
||||||
>>> AdaptiveTanh = AdaptiveActivationFunction(torch.nn.Tanh())
|
|
||||||
>>> AdaptiveTanh(torch.rand(3))
|
|
||||||
tensor([0.1084, 0.3931, 0.7294], grad_fn=<MulBackward0>)
|
|
||||||
>>> AdaptiveTanh.alpha
|
|
||||||
Parameter containing:
|
|
||||||
tensor(1., requires_grad=True)
|
|
||||||
>>>
|
|
||||||
>>> # simple adaptive function with trainable parameters fixed alpha
|
|
||||||
>>> AdaptiveTanh = AdaptiveActivationFunction(torch.nn.Tanh(),
|
|
||||||
... fixed=['alpha'])
|
|
||||||
>>> AdaptiveTanh.alpha
|
|
||||||
tensor(1.)
|
|
||||||
>>> AdaptiveTanh.beta
|
|
||||||
Parameter containing:
|
|
||||||
tensor(1., requires_grad=True)
|
|
||||||
>>>
|
|
||||||
|
|
||||||
.. seealso::
|
.. seealso::
|
||||||
|
|
||||||
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
**Original reference**: Godfrey, Luke B., and Michael S. Gashler.
|
||||||
@@ -51,14 +33,18 @@ class AdaptiveActivationFunction(torch.nn.Module):
|
|||||||
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
Vol. 1. IEEE, 2015. DOI: `arXiv preprint arXiv:1602.01321.
|
||||||
<https://arxiv.org/abs/1602.01321>`_.
|
<https://arxiv.org/abs/1602.01321>`_.
|
||||||
|
|
||||||
|
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
|
||||||
|
activation functions accelerate convergence in deep and
|
||||||
|
physics-informed neural networks*. Journal of
|
||||||
|
Computational Physics 404 (2020): 109136.
|
||||||
|
DOI: `JCP 10.1016
|
||||||
|
<https://doi.org/10.1016/j.jcp.2019.109136>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, func, alpha=None, beta=None, gamma=None, fixed=None):
|
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
|
||||||
"""
|
"""
|
||||||
Initializes the AdaptiveActivationFunction module.
|
Initializes the Adaptive Function.
|
||||||
|
|
||||||
:param callable func: The original collable function. It could be an
|
|
||||||
initialized :meth:`torch.nn.Module`, or a python callable function.
|
|
||||||
:param float | complex alpha: Scaling parameter alpha.
|
:param float | complex alpha: Scaling parameter alpha.
|
||||||
Defaults to ``None``. When ``None`` is passed,
|
Defaults to ``None``. When ``None`` is passed,
|
||||||
the variable is initialized to 1.
|
the variable is initialized to 1.
|
||||||
@@ -70,7 +56,7 @@ class AdaptiveActivationFunction(torch.nn.Module):
|
|||||||
the variable is initialized to 1.
|
the variable is initialized to 1.
|
||||||
:param list fixed: List of parameters to fix during training,
|
:param list fixed: List of parameters to fix during training,
|
||||||
i.e. not optimized (``requires_grad`` set to ``False``).
|
i.e. not optimized (``requires_grad`` set to ``False``).
|
||||||
Options are ['alpha', 'beta', 'gamma']. Defaults to None.
|
Options are ``alpha``, ``beta``, ``gamma``. Defaults to None.
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
@@ -94,8 +80,6 @@ class AdaptiveActivationFunction(torch.nn.Module):
|
|||||||
check_consistency(alpha, (float, complex))
|
check_consistency(alpha, (float, complex))
|
||||||
check_consistency(beta, (float, complex))
|
check_consistency(beta, (float, complex))
|
||||||
check_consistency(gamma, (float, complex))
|
check_consistency(gamma, (float, complex))
|
||||||
if not callable(func):
|
|
||||||
raise ValueError("Function must be a callable function.")
|
|
||||||
|
|
||||||
# registering as tensors
|
# registering as tensors
|
||||||
alpha = torch.tensor(alpha, requires_grad=False)
|
alpha = torch.tensor(alpha, requires_grad=False)
|
||||||
@@ -120,33 +104,43 @@ class AdaptiveActivationFunction(torch.nn.Module):
|
|||||||
else:
|
else:
|
||||||
self.register_buffer("gamma", gamma)
|
self.register_buffer("gamma", gamma)
|
||||||
|
|
||||||
# registering function
|
# storing the activation
|
||||||
self._func = func
|
self._func = None
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
"""
|
"""
|
||||||
Forward pass of the function.
|
Define the computation performed at every call.
|
||||||
Applies the function to the input elementwise.
|
The function to the input elementwise.
|
||||||
|
|
||||||
|
:param x: The input tensor to evaluate the activation function.
|
||||||
|
:type x: torch.Tensor | LabelTensor
|
||||||
"""
|
"""
|
||||||
return self.alpha * (self._func(self.beta * x + self.gamma))
|
return self.alpha * (self._func(self.beta * x + self.gamma))
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def alpha(self):
|
def alpha(self):
|
||||||
"""
|
"""
|
||||||
The alpha variable
|
The alpha variable.
|
||||||
"""
|
"""
|
||||||
return self._alpha
|
return self._alpha
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def beta(self):
|
def beta(self):
|
||||||
"""
|
"""
|
||||||
The alpha variable
|
The beta variable.
|
||||||
"""
|
"""
|
||||||
return self._beta
|
return self._beta
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def gamma(self):
|
def gamma(self):
|
||||||
"""
|
"""
|
||||||
The alpha variable
|
The gamma variable.
|
||||||
"""
|
"""
|
||||||
return self._gamma
|
return self._gamma
|
||||||
|
|
||||||
|
@property
|
||||||
|
def func(self):
|
||||||
|
"""
|
||||||
|
The callable activation function.
|
||||||
|
"""
|
||||||
|
return self._func
|
||||||
@@ -12,7 +12,6 @@ __all__ = [
|
|||||||
"PeriodicBoundaryEmbedding",
|
"PeriodicBoundaryEmbedding",
|
||||||
"AVNOBlock",
|
"AVNOBlock",
|
||||||
"LowRankBlock",
|
"LowRankBlock",
|
||||||
"AdaptiveActivationFunction",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
from .convolution_2d import ContinuousConvBlock
|
from .convolution_2d import ContinuousConvBlock
|
||||||
@@ -27,4 +26,3 @@ from .pod import PODBlock
|
|||||||
from .embedding import PeriodicBoundaryEmbedding
|
from .embedding import PeriodicBoundaryEmbedding
|
||||||
from .avno_layer import AVNOBlock
|
from .avno_layer import AVNOBlock
|
||||||
from .lowrank_layer import LowRankBlock
|
from .lowrank_layer import LowRankBlock
|
||||||
from .adaptive_func import AdaptiveActivationFunction
|
|
||||||
|
|||||||
62
tests/test_adaptive_functions.py
Normal file
62
tests/test_adaptive_functions.py
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
import torch
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from pina.adaptive_functions import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh,
|
||||||
|
AdaptiveSiLU, AdaptiveMish, AdaptiveELU,
|
||||||
|
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin,
|
||||||
|
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp)
|
||||||
|
|
||||||
|
|
||||||
|
adaptive_functions = (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh,
|
||||||
|
AdaptiveSiLU, AdaptiveMish, AdaptiveELU,
|
||||||
|
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin,
|
||||||
|
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp)
|
||||||
|
x = torch.rand(10, requires_grad=True)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||||
|
def test_constructor(Func):
|
||||||
|
if Func.__name__ == 'AdaptiveExp':
|
||||||
|
# simple
|
||||||
|
Func()
|
||||||
|
# setting values
|
||||||
|
af = Func(alpha=1., beta=2.)
|
||||||
|
assert af.alpha.requires_grad
|
||||||
|
assert af.beta.requires_grad
|
||||||
|
assert af.alpha == 1.
|
||||||
|
assert af.beta == 2.
|
||||||
|
else:
|
||||||
|
# simple
|
||||||
|
Func()
|
||||||
|
# setting values
|
||||||
|
af = Func(alpha=1., beta=2., gamma=3.)
|
||||||
|
assert af.alpha.requires_grad
|
||||||
|
assert af.beta.requires_grad
|
||||||
|
assert af.gamma.requires_grad
|
||||||
|
assert af.alpha == 1.
|
||||||
|
assert af.beta == 2.
|
||||||
|
assert af.gamma == 3.
|
||||||
|
|
||||||
|
# fixed variables
|
||||||
|
af = Func(alpha=1., beta=2., fixed=['alpha'])
|
||||||
|
assert af.alpha.requires_grad is False
|
||||||
|
assert af.beta.requires_grad
|
||||||
|
assert af.alpha == 1.
|
||||||
|
assert af.beta == 2.
|
||||||
|
|
||||||
|
with pytest.raises(TypeError):
|
||||||
|
Func(alpha=1., beta=2., fixed=['delta'])
|
||||||
|
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
Func(alpha='s')
|
||||||
|
Func(alpha=1)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||||
|
def test_forward(Func):
|
||||||
|
af = Func()
|
||||||
|
af(x)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||||
|
def test_backward(Func):
|
||||||
|
af = Func()
|
||||||
|
y = af(x)
|
||||||
|
y.mean().backward()
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
import torch
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from pina.model.layers.adaptive_func import AdaptiveActivationFunction
|
|
||||||
|
|
||||||
x = torch.rand(5)
|
|
||||||
torchfunc = torch.nn.Tanh()
|
|
||||||
|
|
||||||
def test_constructor():
|
|
||||||
# simple
|
|
||||||
AdaptiveActivationFunction(torchfunc)
|
|
||||||
|
|
||||||
# setting values
|
|
||||||
af = AdaptiveActivationFunction(torchfunc, alpha=1., beta=2., gamma=3.)
|
|
||||||
assert af.alpha.requires_grad
|
|
||||||
assert af.beta.requires_grad
|
|
||||||
assert af.gamma.requires_grad
|
|
||||||
assert af.alpha == 1.
|
|
||||||
assert af.beta == 2.
|
|
||||||
assert af.gamma == 3.
|
|
||||||
|
|
||||||
# fixed variables
|
|
||||||
af = AdaptiveActivationFunction(torchfunc, alpha=1., beta=2.,
|
|
||||||
gamma=3., fixed=['alpha'])
|
|
||||||
assert af.alpha.requires_grad is False
|
|
||||||
assert af.beta.requires_grad
|
|
||||||
assert af.gamma.requires_grad
|
|
||||||
assert af.alpha == 1.
|
|
||||||
assert af.beta == 2.
|
|
||||||
assert af.gamma == 3.
|
|
||||||
|
|
||||||
with pytest.raises(TypeError):
|
|
||||||
AdaptiveActivationFunction(torchfunc, alpha=1., beta=2.,
|
|
||||||
gamma=3., fixed=['delta'])
|
|
||||||
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
AdaptiveActivationFunction(torchfunc, alpha='s')
|
|
||||||
AdaptiveActivationFunction(torchfunc, alpha=1., fixed='alpha')
|
|
||||||
AdaptiveActivationFunction(torchfunc, alpha=1)
|
|
||||||
|
|
||||||
def test_forward():
|
|
||||||
af = AdaptiveActivationFunction(torchfunc)
|
|
||||||
af(x)
|
|
||||||
|
|
||||||
def test_backward():
|
|
||||||
af = AdaptiveActivationFunction(torchfunc)
|
|
||||||
y = af(x)
|
|
||||||
y.mean().backward()
|
|
||||||
Reference in New Issue
Block a user