Documentation for v0.1 version (#199)
* Adding Equations, solving typos * improve _code.rst * the team rst and restuctore index.rst * fixing errors --------- Co-authored-by: Dario Coscia <dariocoscia@dhcp-015.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
3f9305d475
commit
8b7b61b3bd
@@ -1,4 +1,3 @@
|
||||
|
||||
from .adaptive_tanh import AdaptiveTanh
|
||||
from .adaptive_sin import AdaptiveSin
|
||||
from .adaptive_cos import AdaptiveCos
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveCos(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,7 +19,8 @@ class AdaptiveCos(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
|
||||
def __init__(self, alpha=None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
@@ -31,16 +33,18 @@ class AdaptiveCos(torch.nn.Module):
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
self.alpha = Parameter(
|
||||
torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
self.alpha = Parameter(
|
||||
torch.tensor(alpha)) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveExp(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,6 +19,7 @@ class AdaptiveExp(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
@@ -28,14 +30,20 @@ class AdaptiveExp(torch.nn.Module):
|
||||
'''
|
||||
super(AdaptiveExp, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale = Parameter(
|
||||
torch.normal(torch.tensor(1.0),
|
||||
torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
self.alpha = Parameter(
|
||||
torch.normal(torch.tensor(1.0),
|
||||
torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate = Parameter(
|
||||
torch.normal(torch.tensor(0.0),
|
||||
torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveLinear(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -19,6 +20,7 @@ class AdaptiveLinear(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
@@ -30,10 +32,10 @@ class AdaptiveLinear(torch.nn.Module):
|
||||
super(AdaptiveLinear, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveReLU(torch.nn.Module, Parameter):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,6 +19,7 @@ class AdaptiveReLU(torch.nn.Module, Parameter):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
@@ -29,10 +31,10 @@ class AdaptiveReLU(torch.nn.Module, Parameter):
|
||||
super(AdaptiveReLU, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.rand(1))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
@@ -40,4 +42,4 @@ class AdaptiveReLU(torch.nn.Module, Parameter):
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return torch.relu(x+self.translate)*self.scale
|
||||
return torch.relu(x + self.translate) * self.scale
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveSin(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,7 +19,8 @@ class AdaptiveSin(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
|
||||
def __init__(self, alpha=None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
@@ -29,14 +31,18 @@ class AdaptiveSin(torch.nn.Module):
|
||||
super(AdaptiveSin, self).__init__()
|
||||
|
||||
# initialize alpha
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
self.alpha = Parameter(
|
||||
torch.normal(torch.tensor(1.0),
|
||||
torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale = Parameter(
|
||||
torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate = Parameter(
|
||||
torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveSoftplus(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,6 +19,7 @@ class AdaptiveSoftplus(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
@@ -31,7 +33,7 @@ class AdaptiveSoftplus(torch.nn.Module):
|
||||
self.soft = torch.nn.Softplus()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
@@ -39,4 +41,4 @@ class AdaptiveSoftplus(torch.nn.Module):
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return self.soft(x)*self.scale
|
||||
return self.soft(x) * self.scale
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveSquare(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,7 +19,8 @@ class AdaptiveSquare(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
|
||||
def __init__(self, alpha=None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
@@ -29,10 +31,10 @@ class AdaptiveSquare(torch.nn.Module):
|
||||
super(AdaptiveSquare, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
|
||||
class AdaptiveTanh(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
@@ -18,7 +19,8 @@ class AdaptiveTanh(torch.nn.Module):
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
|
||||
def __init__(self, alpha=None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
@@ -31,17 +33,19 @@ class AdaptiveTanh(torch.nn.Module):
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
self.alpha = Parameter(
|
||||
torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
self.alpha = Parameter(
|
||||
torch.tensor(alpha)) # create a tensor out of alpha
|
||||
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
@@ -49,4 +53,6 @@ class AdaptiveTanh(torch.nn.Module):
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
x += self.translate
|
||||
return self.scale * (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))/(torch.exp(self.alpha * x) + torch.exp(-self.alpha * x))
|
||||
return self.scale * (torch.exp(self.alpha * x) - torch.exp(
|
||||
-self.alpha * x)) / (torch.exp(self.alpha * x) +
|
||||
torch.exp(-self.alpha * x))
|
||||
|
||||
Reference in New Issue
Block a user