From e2d000ab22e65f588f5a68cd83830693e7a96fe5 Mon Sep 17 00:00:00 2001 From: ndem0 Date: Mon, 8 Apr 2024 15:18:17 +0000 Subject: [PATCH] :art: Format Python code with psf/black --- pina/adaptive_functions/__init__.py | 46 ++++++++------ pina/adaptive_functions/adaptive_func.py | 63 ++++++++++++------- .../adaptive_func_interface.py | 8 +-- pina/model/layers/__init__.py | 2 +- 4 files changed, 72 insertions(+), 47 deletions(-) diff --git a/pina/adaptive_functions/__init__.py b/pina/adaptive_functions/__init__.py index 0ab6053..0fa0ecd 100644 --- a/pina/adaptive_functions/__init__.py +++ b/pina/adaptive_functions/__init__.py @@ -1,21 +1,31 @@ __all__ = [ - 'AdaptiveActivationFunctionInterface', - 'AdaptiveReLU', - 'AdaptiveSigmoid', - 'AdaptiveTanh', - 'AdaptiveSiLU', - 'AdaptiveMish', - 'AdaptiveELU', - 'AdaptiveCELU', - 'AdaptiveGELU', - 'AdaptiveSoftmin', - 'AdaptiveSoftmax', - 'AdaptiveSIREN', - 'AdaptiveExp'] + "AdaptiveActivationFunctionInterface", + "AdaptiveReLU", + "AdaptiveSigmoid", + "AdaptiveTanh", + "AdaptiveSiLU", + "AdaptiveMish", + "AdaptiveELU", + "AdaptiveCELU", + "AdaptiveGELU", + "AdaptiveSoftmin", + "AdaptiveSoftmax", + "AdaptiveSIREN", + "AdaptiveExp", +] -from .adaptive_func import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh, - AdaptiveSiLU, AdaptiveMish, AdaptiveELU, - AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin, - AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp) +from .adaptive_func import ( + AdaptiveReLU, + AdaptiveSigmoid, + AdaptiveTanh, + AdaptiveSiLU, + AdaptiveMish, + AdaptiveELU, + AdaptiveCELU, + AdaptiveGELU, + AdaptiveSoftmin, + AdaptiveSoftmax, + AdaptiveSIREN, + AdaptiveExp, +) from .adaptive_func_interface import AdaptiveActivationFunctionInterface - diff --git a/pina/adaptive_functions/adaptive_func.py b/pina/adaptive_functions/adaptive_func.py index 0ee22b2..30966f1 100644 --- a/pina/adaptive_functions/adaptive_func.py +++ b/pina/adaptive_functions/adaptive_func.py @@ -19,7 +19,7 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the ReLU function is defined as: - + .. math:: \text{ReLU}(x) = \max(0, x) @@ -36,10 +36,11 @@ class AdaptiveReLU(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.ReLU() @@ -59,7 +60,7 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Sigmoid function is defined as: - + .. math:: \text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)} @@ -76,10 +77,11 @@ class AdaptiveSigmoid(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.Sigmoid() @@ -99,7 +101,7 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Tanh function is defined as: - + .. math:: \text{Tanh}(x) = \frac{\exp(x) - \exp(-x)} {\exp(x) + \exp(-x)} @@ -116,10 +118,11 @@ class AdaptiveTanh(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.Tanh() @@ -139,7 +142,7 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the SiLU function is defined as: - + .. math:: \text{SiLU}(x) = x * \sigma(x), \text{where }\sigma(x) \text{ is the logistic sigmoid.} @@ -157,10 +160,11 @@ class AdaptiveSiLU(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.SiLU() @@ -180,7 +184,7 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Mish function is defined as: - + .. math:: \text{Mish}(x) = x * \text{Tanh}(x) @@ -197,10 +201,11 @@ class AdaptiveMish(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.Mish() @@ -244,6 +249,7 @@ class AdaptiveELU(AdaptiveActivationFunctionInterface): DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.ELU() @@ -263,7 +269,7 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the CELU function is defined as: - + .. math:: \text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x) - 1)) @@ -280,14 +286,16 @@ class AdaptiveCELU(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.CELU() + class AdaptiveGELU(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :class:`~torch.nn.GELU` activation function. @@ -302,7 +310,7 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the GELU function is defined as: - + .. math:: \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3))) @@ -320,10 +328,11 @@ class AdaptiveGELU(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.GELU() @@ -343,7 +352,7 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Softmin function is defined as: - + .. math:: \text{Softmin}(x_{i}) = \frac{\exp(-x_i)}{\sum_j \exp(-x_j)} @@ -360,10 +369,11 @@ class AdaptiveSoftmin(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.Softmin() @@ -383,7 +393,7 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface): where :math:`\alpha,\,\beta,\,\gamma` are trainable parameters, and the Softmax function is defined as: - + .. math:: \text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)} @@ -400,14 +410,16 @@ class AdaptiveSoftmax(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.nn.Softmax() + class AdaptiveSIREN(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :obj:`~torch.sin` function. @@ -435,14 +447,16 @@ class AdaptiveSIREN(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, gamma=None, fixed=None): super().__init__(alpha, beta, gamma, fixed) self._func = torch.sin + class AdaptiveExp(AdaptiveActivationFunctionInterface): r""" Adaptive trainable :obj:`~torch.exp` function. @@ -470,19 +484,20 @@ class AdaptiveExp(AdaptiveActivationFunctionInterface): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ + def __init__(self, alpha=None, beta=None, fixed=None): # only alpha, and beta parameters (gamma=0 fixed) if fixed is None: - fixed = ['gamma'] + fixed = ["gamma"] else: check_consistency(fixed, str) - fixed = list(fixed) + ['gamma'] + fixed = list(fixed) + ["gamma"] # calling super - super().__init__(alpha, beta, 0., fixed) - self._func = torch.exp \ No newline at end of file + super().__init__(alpha, beta, 0.0, fixed) + self._func = torch.exp diff --git a/pina/adaptive_functions/adaptive_func_interface.py b/pina/adaptive_functions/adaptive_func_interface.py index b0522d5..a12b78b 100644 --- a/pina/adaptive_functions/adaptive_func_interface.py +++ b/pina/adaptive_functions/adaptive_func_interface.py @@ -36,7 +36,7 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): Jagtap, Ameya D., Kenji Kawaguchi, and George Em Karniadakis. *Adaptive activation functions accelerate convergence in deep and physics-informed neural networks*. Journal of - Computational Physics 404 (2020): 109136. + Computational Physics 404 (2020): 109136. DOI: `JCP 10.1016 `_. """ @@ -103,7 +103,7 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): self._gamma = torch.nn.Parameter(gamma, requires_grad=True) else: self.register_buffer("gamma", gamma) - + # storing the activation self._func = None @@ -137,10 +137,10 @@ class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): The gamma variable. """ return self._gamma - + @property def func(self): """ The callable activation function. """ - return self._func \ No newline at end of file + return self._func diff --git a/pina/model/layers/__init__.py b/pina/model/layers/__init__.py index de9686b..5d20340 100644 --- a/pina/model/layers/__init__.py +++ b/pina/model/layers/__init__.py @@ -25,4 +25,4 @@ from .fourier import FourierBlock1D, FourierBlock2D, FourierBlock3D from .pod import PODBlock from .embedding import PeriodicBoundaryEmbedding from .avno_layer import AVNOBlock -from .lowrank_layer import LowRankBlock \ No newline at end of file +from .lowrank_layer import LowRankBlock