fixing adaptive functions
This commit is contained in:
committed by
Nicola Demo
parent
50fb3b731c
commit
4f5d9559b2
62
tests/test_adaptive_functions.py
Normal file
62
tests/test_adaptive_functions.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.adaptive_functions import (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh,
|
||||
AdaptiveSiLU, AdaptiveMish, AdaptiveELU,
|
||||
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin,
|
||||
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp)
|
||||
|
||||
|
||||
adaptive_functions = (AdaptiveReLU, AdaptiveSigmoid, AdaptiveTanh,
|
||||
AdaptiveSiLU, AdaptiveMish, AdaptiveELU,
|
||||
AdaptiveCELU, AdaptiveGELU, AdaptiveSoftmin,
|
||||
AdaptiveSoftmax, AdaptiveSIREN, AdaptiveExp)
|
||||
x = torch.rand(10, requires_grad=True)
|
||||
|
||||
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||
def test_constructor(Func):
|
||||
if Func.__name__ == 'AdaptiveExp':
|
||||
# simple
|
||||
Func()
|
||||
# setting values
|
||||
af = Func(alpha=1., beta=2.)
|
||||
assert af.alpha.requires_grad
|
||||
assert af.beta.requires_grad
|
||||
assert af.alpha == 1.
|
||||
assert af.beta == 2.
|
||||
else:
|
||||
# simple
|
||||
Func()
|
||||
# setting values
|
||||
af = Func(alpha=1., beta=2., gamma=3.)
|
||||
assert af.alpha.requires_grad
|
||||
assert af.beta.requires_grad
|
||||
assert af.gamma.requires_grad
|
||||
assert af.alpha == 1.
|
||||
assert af.beta == 2.
|
||||
assert af.gamma == 3.
|
||||
|
||||
# fixed variables
|
||||
af = Func(alpha=1., beta=2., fixed=['alpha'])
|
||||
assert af.alpha.requires_grad is False
|
||||
assert af.beta.requires_grad
|
||||
assert af.alpha == 1.
|
||||
assert af.beta == 2.
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
Func(alpha=1., beta=2., fixed=['delta'])
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
Func(alpha='s')
|
||||
Func(alpha=1)
|
||||
|
||||
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||
def test_forward(Func):
|
||||
af = Func()
|
||||
af(x)
|
||||
|
||||
@pytest.mark.parametrize("Func", adaptive_functions)
|
||||
def test_backward(Func):
|
||||
af = Func()
|
||||
y = af(x)
|
||||
y.mean().backward()
|
||||
@@ -1,48 +0,0 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.model.layers.adaptive_func import AdaptiveActivationFunction
|
||||
|
||||
x = torch.rand(5)
|
||||
torchfunc = torch.nn.Tanh()
|
||||
|
||||
def test_constructor():
|
||||
# simple
|
||||
AdaptiveActivationFunction(torchfunc)
|
||||
|
||||
# setting values
|
||||
af = AdaptiveActivationFunction(torchfunc, alpha=1., beta=2., gamma=3.)
|
||||
assert af.alpha.requires_grad
|
||||
assert af.beta.requires_grad
|
||||
assert af.gamma.requires_grad
|
||||
assert af.alpha == 1.
|
||||
assert af.beta == 2.
|
||||
assert af.gamma == 3.
|
||||
|
||||
# fixed variables
|
||||
af = AdaptiveActivationFunction(torchfunc, alpha=1., beta=2.,
|
||||
gamma=3., fixed=['alpha'])
|
||||
assert af.alpha.requires_grad is False
|
||||
assert af.beta.requires_grad
|
||||
assert af.gamma.requires_grad
|
||||
assert af.alpha == 1.
|
||||
assert af.beta == 2.
|
||||
assert af.gamma == 3.
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
AdaptiveActivationFunction(torchfunc, alpha=1., beta=2.,
|
||||
gamma=3., fixed=['delta'])
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
AdaptiveActivationFunction(torchfunc, alpha='s')
|
||||
AdaptiveActivationFunction(torchfunc, alpha=1., fixed='alpha')
|
||||
AdaptiveActivationFunction(torchfunc, alpha=1)
|
||||
|
||||
def test_forward():
|
||||
af = AdaptiveActivationFunction(torchfunc)
|
||||
af(x)
|
||||
|
||||
def test_backward():
|
||||
af = AdaptiveActivationFunction(torchfunc)
|
||||
y = af(x)
|
||||
y.mean().backward()
|
||||
Reference in New Issue
Block a user