tmp commit - toward 0.0.1
This commit is contained in:
6
pina/adaptive_functions/__init__.py
Normal file
6
pina/adaptive_functions/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
from .adaptive_tanh import AdaptiveTanh
|
||||
from .adaptive_sin import AdaptiveSin
|
||||
from .adaptive_cos import AdaptiveCos
|
||||
from .adaptive_linear import AdaptiveLinear
|
||||
from .adaptive_square import AdaptiveSquare
|
||||
50
pina/adaptive_functions/adaptive_cos.py
Normal file
50
pina/adaptive_functions/adaptive_cos.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveCos(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveCos, self).__init__()
|
||||
#self.in_features = in_features
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (torch.cos(self.alpha * x + self.translate))
|
||||
45
pina/adaptive_functions/adaptive_exp.py
Normal file
45
pina/adaptive_functions/adaptive_exp.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveExp(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveExp, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)
|
||||
42
pina/adaptive_functions/adaptive_linear.py
Normal file
42
pina/adaptive_functions/adaptive_linear.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveLinear(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveLinear, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)
|
||||
43
pina/adaptive_functions/adaptive_relu.py
Normal file
43
pina/adaptive_functions/adaptive_relu.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveReLU(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveReLU, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.rand(1))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return torch.relu(x+self.translate)*self.scale
|
||||
46
pina/adaptive_functions/adaptive_sin.py
Normal file
46
pina/adaptive_functions/adaptive_sin.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSin(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveSin, self).__init__()
|
||||
|
||||
# initialize alpha
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (torch.sin(self.alpha * x + self.translate))
|
||||
42
pina/adaptive_functions/adaptive_softplus.py
Normal file
42
pina/adaptive_functions/adaptive_softplus.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSoftplus(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super().__init__()
|
||||
|
||||
self.soft = torch.nn.Softplus()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return self.soft(x)*self.scale
|
||||
42
pina/adaptive_functions/adaptive_square.py
Normal file
42
pina/adaptive_functions/adaptive_square.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSquare(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveSquare, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)**2
|
||||
52
pina/adaptive_functions/adaptive_tanh.py
Normal file
52
pina/adaptive_functions/adaptive_tanh.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveTanh(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveTanh, self).__init__()
|
||||
#self.in_features = in_features
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
x += self.translate
|
||||
return self.scale * (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))/(torch.exp(self.alpha * x) + torch.exp(-self.alpha * x))
|
||||
Reference in New Issue
Block a user