🎨 Format Python code with psf/black

This commit is contained in:
ndem0
2024-04-08 15:18:17 +00:00
committed by Nicola Demo
parent 4f5d9559b2
commit e2d000ab22
4 changed files with 72 additions and 47 deletions

View File

@@ -1,21 +1,31 @@
__all__ = [ __all__ = [
'AdaptiveActivationFunctionInterface', "AdaptiveActivationFunctionInterface",
'AdaptiveReLU', "AdaptiveReLU",
'AdaptiveSigmoid', "AdaptiveSigmoid",
'AdaptiveTanh', "AdaptiveTanh",
'AdaptiveSiLU', "AdaptiveSiLU",
'AdaptiveMish', "AdaptiveMish",
'AdaptiveELU', "AdaptiveELU",
'AdaptiveCELU', "AdaptiveCELU",
'AdaptiveGELU', "AdaptiveGELU",
'AdaptiveSoftmin', "AdaptiveSoftmin",
'AdaptiveSoftmax', "AdaptiveSoftmax",
'AdaptiveSIREN', "AdaptiveSIREN",
'AdaptiveExp'] "AdaptiveExp",
]
from .adaptive_func import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh, from .adaptive_func import (
AdaptiveSiLU, AdaptiveMish, AdaptiveELU, AdaptiveReLU,
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin, AdaptiveSigmoid,
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp) AdaptiveTanh,
AdaptiveSiLU,
AdaptiveMish,
AdaptiveELU,
AdaptiveCELU,
AdaptiveGELU,
AdaptiveSoftmin,
AdaptiveSoftmax,
AdaptiveSIREN,
AdaptiveExp,
)
from .adaptive_func_interface import AdaptiveActivationFunctionInterface from .adaptive_func_interface import AdaptiveActivationFunctionInterface

View File

@@ -19,7 +19,7 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
ReLU function is defined as: ReLU function is defined as:
.. math:: .. math::
\text{ReLU}(x) = \max(0, x) \text{ReLU}(x) = \max(0, x)
@@ -36,10 +36,11 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.ReLU() self._func = torch.nn.ReLU()
@@ -59,7 +60,7 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Sigmoid function is defined as: Sigmoid function is defined as:
.. math:: .. math::
\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)} \text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}
@@ -76,10 +77,11 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.Sigmoid() self._func = torch.nn.Sigmoid()
@@ -99,7 +101,7 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Tanh function is defined as: Tanh function is defined as:
.. math:: .. math::
\text{Tanh}(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)} \text{Tanh}(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)}
@@ -116,10 +118,11 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.Tanh() self._func = torch.nn.Tanh()
@@ -139,7 +142,7 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
SiLU function is defined as: SiLU function is defined as:
.. math:: .. math::
\text{SiLU}(x) = x * \sigma(x), \text{where }\sigma(x) \text{SiLU}(x) = x * \sigma(x), \text{where }\sigma(x)
\text{ is the logistic sigmoid.} \text{ is the logistic sigmoid.}
@@ -157,10 +160,11 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.SiLU() self._func = torch.nn.SiLU()
@@ -180,7 +184,7 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Mish function is defined as: Mish function is defined as:
.. math:: .. math::
\text{Mish}(x) = x * \text{Tanh}(x) \text{Mish}(x) = x * \text{Tanh}(x)
@@ -197,10 +201,11 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.Mish() self._func = torch.nn.Mish()
@@ -244,6 +249,7 @@ class AdaptiveELU(AdaptiveActivationFunctionInterface):
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.ELU() self._func = torch.nn.ELU()
@@ -263,7 +269,7 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
CELU function is defined as: CELU function is defined as:
.. math:: .. math::
\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1)) \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1))
@@ -280,14 +286,16 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.CELU() self._func = torch.nn.CELU()
class AdaptiveGELU(AdaptiveActivationFunctionInterface): class AdaptiveGELU(AdaptiveActivationFunctionInterface):
r""" r"""
Adaptive trainable :class:`~torch.nn.GELU` activation function. Adaptive trainable :class:`~torch.nn.GELU` activation function.
@@ -302,7 +310,7 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
GELU function is defined as: GELU function is defined as:
.. math:: .. math::
\text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3))) \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3)))
@@ -320,10 +328,11 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.GELU() self._func = torch.nn.GELU()
@@ -343,7 +352,7 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Softmin function is defined as: Softmin function is defined as:
.. math:: .. math::
\text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)} \text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)}
@@ -360,10 +369,11 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.Softmin() self._func = torch.nn.Softmin()
@@ -383,7 +393,7 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface):
where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the
Softmax function is defined as: Softmax function is defined as:
.. math:: .. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
@@ -400,14 +410,16 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.nn.Softmax() self._func = torch.nn.Softmax()
class AdaptiveSIREN(AdaptiveActivationFunctionInterface): class AdaptiveSIREN(AdaptiveActivationFunctionInterface):
r""" r"""
Adaptive trainable :obj:`~torch.sin` function. Adaptive trainable :obj:`~torch.sin` function.
@@ -435,14 +447,16 @@ class AdaptiveSIREN(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): def __init__(self, alpha=None, beta=None, gamma=None, fixed=None):
super().__init__(alpha, beta, gamma, fixed) super().__init__(alpha, beta, gamma, fixed)
self._func = torch.sin self._func = torch.sin
class AdaptiveExp(AdaptiveActivationFunctionInterface): class AdaptiveExp(AdaptiveActivationFunctionInterface):
r""" r"""
Adaptive trainable :obj:`~torch.exp` function. Adaptive trainable :obj:`~torch.exp` function.
@@ -470,19 +484,20 @@ class AdaptiveExp(AdaptiveActivationFunctionInterface):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
def __init__(self, alpha=None, beta=None, fixed=None): def __init__(self, alpha=None, beta=None, fixed=None):
# only alpha, and beta parameters (gamma=0 fixed) # only alpha, and beta parameters (gamma=0 fixed)
if fixed is None: if fixed is None:
fixed = ['gamma'] fixed = ["gamma"]
else: else:
check_consistency(fixed, str) check_consistency(fixed, str)
fixed = list(fixed) + ['gamma'] fixed = list(fixed) + ["gamma"]
# calling super # calling super
super().__init__(alpha, beta, 0., fixed) super().__init__(alpha, beta, 0.0, fixed)
self._func = torch.exp self._func = torch.exp

View File

@@ -36,7 +36,7 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive
activation functions accelerate convergence in deep and activation functions accelerate convergence in deep and
physics-informed neural networks*. Journal of physics-informed neural networks*. Journal of
Computational Physics 404 (2020): 109136. Computational Physics 404 (2020): 109136.
DOI: `JCP 10.1016 DOI: `JCP 10.1016
<https://doi.org/10.1016/j.jcp.2019.109136>`_. <https://doi.org/10.1016/j.jcp.2019.109136>`_.
""" """
@@ -103,7 +103,7 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
self._gamma = torch.nn.Parameter(gamma, requires_grad=True) self._gamma = torch.nn.Parameter(gamma, requires_grad=True)
else: else:
self.register_buffer("gamma", gamma) self.register_buffer("gamma", gamma)
# storing the activation # storing the activation
self._func = None self._func = None
@@ -137,10 +137,10 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
The gamma variable. The gamma variable.
""" """
return self._gamma return self._gamma
@property @property
def func(self): def func(self):
""" """
The callable activation function. The callable activation function.
""" """
return self._func return self._func

View File

@@ -25,4 +25,4 @@ from .fourier import FourierBlock1D, FourierBlock2D, FourierBlock3D
from .pod import PODBlock from .pod import PODBlock
from .embedding import PeriodicBoundaryEmbedding from .embedding import PeriodicBoundaryEmbedding
from .avno_layer import AVNOBlock from .avno_layer import AVNOBlock
from .lowrank_layer import LowRankBlock from .lowrank_layer import LowRankBlock