🎨 Format Python code with psf/black

This commit is contained in:
ndem0
2024-02-09 11:25:00 +00:00
committed by Nicola Demo
parent 591aeeb02b
commit cbb43a5392
64 changed files with 1323 additions and 955 deletions

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveCos(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,26 +18,28 @@ class AdaptiveCos(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self, alpha=None):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveCos, self).__init__()
#self.in_features = in_features
# self.in_features = in_features
# initialize alpha
if alpha == None:
self.alpha = Parameter(
torch.tensor(1.0)) # create a tensor out of alpha
torch.tensor(1.0)
) # create a tensor out of alpha
else:
self.alpha = Parameter(
torch.tensor(alpha)) # create a tensor out of alpha
torch.tensor(alpha)
) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.scale = Parameter(torch.tensor(1.0))
@@ -47,8 +49,8 @@ class AdaptiveCos(torch.nn.Module):
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
"""
return self.scale * (torch.cos(self.alpha * x + self.translate))

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveExp(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,36 +18,36 @@ class AdaptiveExp(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveExp, self).__init__()
self.scale = Parameter(
torch.normal(torch.tensor(1.0),
torch.tensor(0.1))) # create a tensor out of alpha
torch.normal(torch.tensor(1.0), torch.tensor(0.1))
) # create a tensor out of alpha
self.scale.requiresGrad = True # set requiresGrad to true!
self.alpha = Parameter(
torch.normal(torch.tensor(1.0),
torch.tensor(0.1))) # create a tensor out of alpha
torch.normal(torch.tensor(1.0), torch.tensor(0.1))
) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(
torch.normal(torch.tensor(0.0),
torch.tensor(0.1))) # create a tensor out of alpha
torch.normal(torch.tensor(0.0), torch.tensor(0.1))
) # create a tensor out of alpha
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
"""
return self.scale * (x + self.translate)

View File

@@ -1,10 +1,11 @@
""" Implementation of adaptive linear layer. """
import torch
from torch.nn.parameter import Parameter
class AdaptiveLinear(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -19,16 +20,16 @@ class AdaptiveLinear(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveLinear, self).__init__()
self.scale = Parameter(torch.tensor(1.0))
@@ -38,8 +39,8 @@ class AdaptiveLinear(torch.nn.Module):
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
"""
return self.scale * (x + self.translate)

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveReLU(torch.nn.Module, Parameter):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,16 +18,16 @@ class AdaptiveReLU(torch.nn.Module, Parameter):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveReLU, self).__init__()
self.scale = Parameter(torch.rand(1))
@@ -37,9 +37,9 @@ class AdaptiveReLU(torch.nn.Module, Parameter):
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
#x += self.translate
"""
# x += self.translate
return torch.relu(x + self.translate) * self.scale

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveSin(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,35 +18,37 @@ class AdaptiveSin(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self, alpha=None):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveSin, self).__init__()
# initialize alpha
self.alpha = Parameter(
torch.normal(torch.tensor(1.0),
torch.tensor(0.1))) # create a tensor out of alpha
torch.normal(torch.tensor(1.0), torch.tensor(0.1))
) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.scale = Parameter(
torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
torch.normal(torch.tensor(1.0), torch.tensor(0.1))
)
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(
torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
torch.normal(torch.tensor(0.0), torch.tensor(0.1))
)
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
"""
return self.scale * (torch.sin(self.alpha * x + self.translate))

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveSoftplus(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,16 +18,16 @@ class AdaptiveSoftplus(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super().__init__()
self.soft = torch.nn.Softplus()
@@ -36,9 +36,9 @@ class AdaptiveSoftplus(torch.nn.Module):
self.scale.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
#x += self.translate
"""
# x += self.translate
return self.soft(x) * self.scale

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveSquare(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,16 +18,16 @@ class AdaptiveSquare(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self, alpha=None):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveSquare, self).__init__()
self.scale = Parameter(torch.tensor(1.0))
@@ -37,8 +37,8 @@ class AdaptiveSquare(torch.nn.Module):
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (x + self.translate)**2
"""
return self.scale * (x + self.translate) ** 2

View File

@@ -3,7 +3,7 @@ from torch.nn.parameter import Parameter
class AdaptiveTanh(torch.nn.Module):
'''
"""
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
@@ -18,26 +18,28 @@ class AdaptiveTanh(torch.nn.Module):
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
"""
def __init__(self, alpha=None):
'''
"""
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
"""
super(AdaptiveTanh, self).__init__()
#self.in_features = in_features
# self.in_features = in_features
# initialize alpha
if alpha == None:
self.alpha = Parameter(
torch.tensor(1.0)) # create a tensor out of alpha
torch.tensor(1.0)
) # create a tensor out of alpha
else:
self.alpha = Parameter(
torch.tensor(alpha)) # create a tensor out of alpha
torch.tensor(alpha)
) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
@@ -48,11 +50,13 @@ class AdaptiveTanh(torch.nn.Module):
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
"""
Forward pass of the function.
Applies the function to the input elementwise.
'''
"""
x += self.translate
return self.scale * (torch.exp(self.alpha * x) - torch.exp(
-self.alpha * x)) / (torch.exp(self.alpha * x) +
torch.exp(-self.alpha * x))
return (
self.scale
* (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))
/ (torch.exp(self.alpha * x) + torch.exp(-self.alpha * x))
)