Fix Codacy Warnings (#477)

---------

Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
Filippo Olivo
2025-03-10 15:38:45 +01:00
committed by Nicola Demo
parent e3790e049a
commit 4177bfbb50
157 changed files with 3473 additions and 3839 deletions

View File

@@ -18,8 +18,8 @@ def make_grid(x):
# initializing transfomed image
coordinates = torch.zeros(
[channels, prod(dimension),
len(dimension) + 1]).to(image.device)
[channels, prod(dimension), len(dimension) + 1]
).to(image.device)
# creating the n dimensional mesh grid
values_mesh = [
@@ -43,9 +43,13 @@ class MLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = torch.nn.Sequential(torch.nn.Linear(2, 8), torch.nn.ReLU(),
torch.nn.Linear(8, 8), torch.nn.ReLU(),
torch.nn.Linear(8, 1))
self.model = torch.nn.Sequential(
torch.nn.Linear(2, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 1),
)
def forward(self, x):
return self.model(x)
@@ -61,7 +65,7 @@ stride = {
"domain": [10, 10],
"start": [0, 0],
"jumps": [3, 3],
"direction": [1, 1.]
"direction": [1, 1.0],
}
dim_filter = len(dim)
dim_input = (batch, channel_input, 10, dim_filter)
@@ -73,53 +77,42 @@ x = make_grid(x)
def test_constructor():
model = MLP
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model)
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=None)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=None
)
def test_forward():
model = MLP
# simple forward
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv(x)
# simple forward with optimization
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model,
optimize=True)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model, optimize=True
)
conv(x)
def test_backward():
model = MLP
x = torch.rand(dim_input)
x = make_grid(x)
x.requires_grad = True
# simple backward
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv(x)
l=torch.mean(conv(x))
l = torch.mean(conv(x))
l.backward()
assert x._grad.shape == torch.Size([2, 2, 20, 3])
x = torch.rand(dim_input)
@@ -127,14 +120,11 @@ def test_backward():
x.requires_grad = True
# simple backward with optimization
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model,
optimize=True)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model, optimize=True
)
conv(x)
l=torch.mean(conv(x))
l = torch.mean(conv(x))
l.backward()
assert x._grad.shape == torch.Size([2, 2, 20, 3])
@@ -143,17 +133,13 @@ def test_transpose():
model = MLP
# simple transpose
conv = ContinuousConvBlock(channel_input,
channel_output,
dim,
stride,
model=model)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv2 = ContinuousConvBlock(channel_output,
channel_input,
dim,
stride,
model=model)
conv2 = ContinuousConvBlock(
channel_output, channel_input, dim, stride, model=model
)
integrals = conv(x)
conv2.transpose(integrals[..., -1], x)

View File

@@ -6,55 +6,66 @@ from pina.model.block import PeriodicBoundaryEmbedding, FourierFeatureEmbedding
# test tolerance
tol = 1e-6
def check_same_columns(tensor):
# Get the first column and compute residual
residual = tensor - tensor[0]
zeros = torch.zeros_like(residual)
# Compare each column with the first column
all_same = torch.allclose(input=residual,other=zeros,atol=tol)
all_same = torch.allclose(input=residual, other=zeros, atol=tol)
return all_same
def grad(u, x):
"""
Compute the first derivative of u with respect to x.
"""
return torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True, allow_unused=True,
retain_graph=True)[0]
return torch.autograd.grad(
u,
x,
grad_outputs=torch.ones_like(u),
create_graph=True,
allow_unused=True,
retain_graph=True,
)[0]
def test_constructor_PeriodicBoundaryEmbedding():
PeriodicBoundaryEmbedding(input_dimension=1, periods=2)
PeriodicBoundaryEmbedding(input_dimension=1, periods={'x': 3, 'y' : 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0: 3, 1 : 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": 3, "y": 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0: 3, 1: 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods=2, output_dimension=10)
with pytest.raises(TypeError):
PeriodicBoundaryEmbedding()
with pytest.raises(ValueError):
PeriodicBoundaryEmbedding(input_dimension=1., periods=1)
PeriodicBoundaryEmbedding(input_dimension=1, periods=1,
output_dimension=1.)
PeriodicBoundaryEmbedding(input_dimension=1, periods={'x':'x'})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0:'x'})
PeriodicBoundaryEmbedding(input_dimension=1.0, periods=1)
PeriodicBoundaryEmbedding(
input_dimension=1, periods=1, output_dimension=1.0
)
PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": "x"})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0: "x"})
@pytest.mark.parametrize("period", [1, 4, 10])
@pytest.mark.parametrize("input_dimension", [1, 2, 3])
def test_forward_backward_same_period_PeriodicBoundaryEmbedding(input_dimension,
period):
def test_forward_backward_same_period_PeriodicBoundaryEmbedding(
input_dimension, period
):
func = torch.nn.Sequential(
PeriodicBoundaryEmbedding(input_dimension=input_dimension,
output_dimension=60, periods=period),
PeriodicBoundaryEmbedding(
input_dimension=input_dimension, output_dimension=60, periods=period
),
torch.nn.Tanh(),
torch.nn.Linear(60, 60),
torch.nn.Tanh(),
torch.nn.Linear(60, 1)
torch.nn.Linear(60, 1),
)
# coordinates
x = period * torch.tensor([[0.],[1.]])
x = period * torch.tensor([[0.0], [1.0]])
if input_dimension == 2:
x = torch.cartesian_prod(x.flatten(),x.flatten())
x = torch.cartesian_prod(x.flatten(), x.flatten())
elif input_dimension == 3:
x = torch.cartesian_prod(x.flatten(),x.flatten(),x.flatten())
x = torch.cartesian_prod(x.flatten(), x.flatten(), x.flatten())
x.requires_grad = True
# output
f = func(x)
@@ -63,29 +74,32 @@ def test_forward_backward_same_period_PeriodicBoundaryEmbedding(input_dimension,
loss = f.mean()
loss.backward()
def test_constructor_FourierFeatureEmbedding():
FourierFeatureEmbedding(input_dimension=1, output_dimension=20,
sigma=1)
with pytest.raises(TypeError):
FourierFeatureEmbedding(input_dimension=1, output_dimension=20, sigma=1)
with pytest.raises(TypeError):
FourierFeatureEmbedding()
with pytest.raises(RuntimeError):
with pytest.raises(RuntimeError):
FourierFeatureEmbedding(input_dimension=1, output_dimension=3, sigma=1)
with pytest.raises(ValueError):
FourierFeatureEmbedding(input_dimension='x', output_dimension=20,
sigma=1)
FourierFeatureEmbedding(input_dimension=1, output_dimension='x',
sigma=1)
FourierFeatureEmbedding(input_dimension=1, output_dimension=20,
sigma='x')
FourierFeatureEmbedding(
input_dimension="x", output_dimension=20, sigma=1
)
FourierFeatureEmbedding(
input_dimension=1, output_dimension="x", sigma=1
)
FourierFeatureEmbedding(
input_dimension=1, output_dimension=20, sigma="x"
)
@pytest.mark.parametrize("output_dimension", [2, 4, 6])
@pytest.mark.parametrize("input_dimension", [1, 2, 3])
@pytest.mark.parametrize("sigma", [10, 1, 0.1])
def test_forward_backward_FourierFeatureEmbedding(input_dimension,
output_dimension,
sigma):
func = FourierFeatureEmbedding(input_dimension, output_dimension,
sigma)
def test_forward_backward_FourierFeatureEmbedding(
input_dimension, output_dimension, sigma
):
func = FourierFeatureEmbedding(input_dimension, output_dimension, sigma)
# coordinates
x = torch.rand((10, input_dimension), requires_grad=True)
# output
@@ -93,4 +107,4 @@ def test_forward_backward_FourierFeatureEmbedding(input_dimension,
assert f.shape[-1] == output_dimension
# compute backward
loss = f.mean()
loss.backward()
loss.backward()

View File

@@ -7,23 +7,29 @@ batch = 5
def test_constructor_1d():
FourierBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5)
FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5,
)
def test_forward_1d():
sconv = FourierBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4)
sconv = FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
sconv(x)
def test_backward_1d():
sconv = FourierBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4)
sconv = FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
x.requires_grad = True
sconv(x)
@@ -33,23 +39,29 @@ def test_backward_1d():
def test_constructor_2d():
FourierBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
def test_forward_2d():
sconv = FourierBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
sconv = FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
sconv(x)
def test_backward_2d():
sconv = FourierBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
sconv = FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
x.requires_grad = True
sconv(x)
@@ -59,23 +71,29 @@ def test_backward_2d():
def test_constructor_3d():
FourierBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
def test_forward_3d():
sconv = FourierBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
sconv = FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
sconv(x)
def test_backward_3d():
sconv = FourierBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
sconv = FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
x.requires_grad = True
sconv(x)

View File

@@ -5,54 +5,66 @@ from pina.model.block import LowRankBlock
from pina import LabelTensor
input_dimensions=2
embedding_dimenion=1
rank=4
inner_size=20
n_layers=2
func=torch.nn.Tanh
bias=True
input_dimensions = 2
embedding_dimenion = 1
rank = 4
inner_size = 20
n_layers = 2
func = torch.nn.Tanh
bias = True
def test_constructor():
LowRankBlock(input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias)
LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
def test_constructor_wrong():
with pytest.raises(ValueError):
LowRankBlock(input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=0.5,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias)
LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=0.5,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
def test_forward():
block = LowRankBlock(input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias)
data = LabelTensor(torch.rand(10, 30, 3), labels=['x', 'y', 'u'])
block(data.extract('u'), data.extract(['x', 'y']))
block = LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
block(data.extract("u"), data.extract(["x", "y"]))
def test_backward():
block = LowRankBlock(input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias)
data = LabelTensor(torch.rand(10, 30, 3), labels=['x', 'y', 'u'])
block = LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
data.requires_grad_(True)
out = block(data.extract('u'), data.extract(['x', 'y']))
out = block(data.extract("u"), data.extract(["x", "y"]))
loss = out.mean()
loss.backward()
loss.backward()

View File

@@ -8,10 +8,11 @@ list_matrices = [
torch.randn(10, 3),
torch.rand(100, 5),
torch.randn(5, 5),
]
]
list_prohibited_matrices_dim0 = list_matrices[:-1]
@pytest.mark.parametrize("dim", [-1, 0, 1, None])
@pytest.mark.parametrize("requires_grad", [True, False, None])
def test_constructor(dim, requires_grad):
@@ -29,11 +30,13 @@ def test_constructor(dim, requires_grad):
if requires_grad is not None:
assert block.requires_grad == requires_grad
def test_wrong_constructor():
with pytest.raises(IndexError):
OrthogonalBlock(2)
OrthogonalBlock(2)
with pytest.raises(ValueError):
OrthogonalBlock('a')
OrthogonalBlock("a")
@pytest.mark.parametrize("V", list_matrices)
def test_forward(V):
@@ -42,7 +45,10 @@ def test_forward(V):
V_orth = orth(V)
V_orth_row = orth_row(V.T)
assert torch.allclose(V_orth.T @ V_orth, torch.eye(V.shape[1]), atol=1e-6)
assert torch.allclose(V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6)
assert torch.allclose(
V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6
)
@pytest.mark.parametrize("V", list_matrices)
def test_backward(V):
@@ -51,6 +57,7 @@ def test_backward(V):
loss = V_orth.mean()
loss.backward()
@pytest.mark.parametrize("V", list_matrices)
def test_wrong_backward(V):
orth = OrthogonalBlock(requires_grad=False)
@@ -59,10 +66,10 @@ def test_wrong_backward(V):
with pytest.raises(RuntimeError):
loss.backward()
@pytest.mark.parametrize("V", list_prohibited_matrices_dim0)
def test_forward_prohibited(V):
orth = OrthogonalBlock(0)
with pytest.raises(Warning):
V_orth = orth(V)
assert V.shape[0] > V.shape[1]

View File

@@ -4,7 +4,10 @@ import pytest
from pina.model.block.pod_block import PODBlock
x = torch.linspace(-1, 1, 100)
toy_snapshots = torch.vstack([torch.exp(-x**2)*c for c in torch.linspace(0, 1, 10)])
toy_snapshots = torch.vstack(
[torch.exp(-(x**2)) * c for c in torch.linspace(0, 1, 10)]
)
def test_constructor():
pod = PODBlock(2)
@@ -23,6 +26,7 @@ def test_fit(rank, scale):
assert pod.rank == rank
assert pod.scale_coefficients == scale
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
@@ -34,15 +38,16 @@ def test_fit(rank, scale, randomized):
assert pod.basis.shape == (rank, dof)
assert pod._basis.shape == (n_snap, dof)
if scale is True:
assert pod._scaler['mean'].shape == (n_snap,)
assert pod._scaler['std'].shape == (n_snap,)
assert pod.scaler['mean'].shape == (rank,)
assert pod.scaler['std'].shape == (rank,)
assert pod.scaler['mean'].shape[0] == pod.basis.shape[0]
assert pod._scaler["mean"].shape == (n_snap,)
assert pod._scaler["std"].shape == (n_snap,)
assert pod.scaler["mean"].shape == (rank,)
assert pod.scaler["std"].shape == (rank,)
assert pod.scaler["mean"].shape[0] == pod.basis.shape[0]
else:
assert pod._scaler == None
assert pod.scaler == None
def test_forward():
pod = PODBlock(1)
pod.fit(toy_snapshots)
@@ -64,6 +69,7 @@ def test_forward():
torch.testing.assert_close(c.mean(dim=0), torch.zeros(pod.rank))
torch.testing.assert_close(c.std(dim=0), torch.ones(pod.rank))
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
@@ -74,6 +80,7 @@ def test_expand(rank, scale, randomized):
torch.testing.assert_close(pod.expand(c), toy_snapshots)
torch.testing.assert_close(pod.expand(c[0]), toy_snapshots[0].unsqueeze(0))
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
@@ -81,9 +88,9 @@ def test_reduce_expand(rank, scale, randomized):
pod = PODBlock(rank, scale)
pod.fit(toy_snapshots, randomized)
torch.testing.assert_close(
pod.expand(pod.reduce(toy_snapshots)),
toy_snapshots)
pod.expand(pod.reduce(toy_snapshots)), toy_snapshots
)
torch.testing.assert_close(
pod.expand(pod.reduce(toy_snapshots[0])),
toy_snapshots[0].unsqueeze(0))
# torch.testing.assert_close(pod.expand(pod.reduce(c[0])), c[0])
pod.expand(pod.reduce(toy_snapshots[0])), toy_snapshots[0].unsqueeze(0)
)
# torch.testing.assert_close(pod.expand(pod.reduce(c[0])), c[0])

View File

@@ -6,26 +6,42 @@ from pina.model.block.rbf_block import RBFBlock
x = torch.linspace(-1, 1, 100)
toy_params = torch.linspace(0, 1, 10).unsqueeze(1)
toy_snapshots = torch.vstack([torch.exp(-x**2)*c for c in toy_params])
toy_snapshots = torch.vstack([torch.exp(-(x**2)) * c for c in toy_params])
toy_params_test = torch.linspace(0, 1, 3).unsqueeze(1)
toy_snapshots_test = torch.vstack([torch.exp(-x**2)*c for c in toy_params_test])
toy_snapshots_test = torch.vstack(
[torch.exp(-(x**2)) * c for c in toy_params_test]
)
kernels = ["linear", "thin_plate_spline", "cubic", "quintic",
"multiquadric", "inverse_multiquadric", "inverse_quadratic", "gaussian"]
kernels = [
"linear",
"thin_plate_spline",
"cubic",
"quintic",
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian",
]
noscale_invariant_kernels = ["multiquadric", "inverse_multiquadric",
"inverse_quadratic", "gaussian"]
noscale_invariant_kernels = [
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian",
]
scale_invariant_kernels = ["linear", "thin_plate_spline", "cubic", "quintic"]
def test_constructor_default():
rbf = RBFBlock()
assert rbf.kernel == "thin_plate_spline"
assert rbf.epsilon == 1
assert rbf.smoothing == 0.
assert rbf.smoothing == 0.0
@pytest.mark.parametrize("kernel", kernels)
@pytest.mark.parametrize("epsilon", [0.1, 1., 10.])
@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0])
def test_constructor_epsilon(kernel, epsilon):
if kernel in scale_invariant_kernels:
rbf = RBFBlock(kernel=kernel)
@@ -38,15 +54,17 @@ def test_constructor_epsilon(kernel, epsilon):
assert rbf.kernel == kernel
assert rbf.epsilon == epsilon
assert rbf.smoothing == 0.
assert rbf.smoothing == 0.0
@pytest.mark.parametrize("kernel", kernels)
@pytest.mark.parametrize("epsilon", [0.1, 1., 10.])
@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0])
@pytest.mark.parametrize("degree", [2, 3, 4])
@pytest.mark.parametrize("smoothing", [1e-5, 1e-3, 1e-1])
def test_constructor_all(kernel, epsilon, degree, smoothing):
rbf = RBFBlock(kernel=kernel, epsilon=epsilon, degree=degree,
smoothing=smoothing)
rbf = RBFBlock(
kernel=kernel, epsilon=epsilon, degree=degree, smoothing=smoothing
)
assert rbf.kernel == kernel
assert rbf.epsilon == epsilon
assert rbf.degree == degree
@@ -58,16 +76,21 @@ def test_constructor_all(kernel, epsilon, degree, smoothing):
assert rbf._scale == None
assert rbf._coeffs == None
def test_fit():
rbf = RBFBlock()
rbf.fit(toy_params, toy_snapshots)
ndim = toy_params.shape[1]
torch.testing.assert_close(rbf.y, toy_params)
torch.testing.assert_close(rbf.d, toy_snapshots)
assert rbf.powers.shape == (math.comb(rbf.degree+ndim, ndim), ndim)
assert rbf.powers.shape == (math.comb(rbf.degree + ndim, ndim), ndim)
assert rbf._shift.shape == (ndim,)
assert rbf._scale.shape == (ndim,)
assert rbf._coeffs.shape == (rbf.powers.shape[0]+toy_snapshots.shape[0], toy_snapshots.shape[1])
assert rbf._coeffs.shape == (
rbf.powers.shape[0] + toy_snapshots.shape[0],
toy_snapshots.shape[1],
)
def test_forward():
rbf = RBFBlock()
@@ -76,10 +99,10 @@ def test_forward():
assert c.shape == toy_snapshots.shape
torch.testing.assert_close(c, toy_snapshots)
def test_forward_unseen_parameters():
rbf = RBFBlock()
rbf.fit(toy_params, toy_snapshots)
c = rbf(toy_params_test)
assert c.shape == toy_snapshots_test.shape
torch.testing.assert_close(c, toy_snapshots_test)

View File

@@ -7,10 +7,9 @@ def test_constructor_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
res_block = ResidualBlock(input_dim=10,
output_dim=3,
hidden_dim=4,
spectral_norm=True)
res_block = ResidualBlock(
input_dim=10, output_dim=3, hidden_dim=4, spectral_norm=True
)
def test_forward_residual_block():
@@ -22,8 +21,9 @@ def test_forward_residual_block():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
x = torch.rand(size=(80, 10))
@@ -31,27 +31,37 @@ def test_backward_residual_block():
y = res_block(x)
l = torch.mean(y)
l.backward()
assert x._grad.shape == torch.Size([80,10])
assert x._grad.shape == torch.Size([80, 10])
def test_constructor_no_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
enhanced_linear = EnhancedLinear(linear_layer)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
activation = nn.ReLU()
enhanced_linear = EnhancedLinear(linear_layer, activation)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_constructor_no_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, dropout=dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
@@ -59,7 +69,10 @@ def test_constructor_with_activation_with_dropout():
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_forward_enhanced_linear_no_dropout():
@@ -70,8 +83,9 @@ def test_forward_enhanced_linear_no_dropout():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_no_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3))
x = torch.rand(size=(80, 10))
@@ -81,6 +95,7 @@ def test_backward_enhanced_linear_no_dropout():
l.backward()
assert x._grad.shape == torch.Size([80, 10])
def test_forward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
@@ -90,8 +105,9 @@ def test_forward_enhanced_linear_dropout():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
x = torch.rand(size=(80, 10))

View File

@@ -1,4 +1,8 @@
from pina.model.block import SpectralConvBlock1D, SpectralConvBlock2D, SpectralConvBlock3D
from pina.model.block import (
SpectralConvBlock1D,
SpectralConvBlock2D,
SpectralConvBlock3D,
)
import torch
input_numb_fields = 3
@@ -7,78 +11,96 @@ batch = 5
def test_constructor_1d():
SpectralConvBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5)
SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5,
)
def test_forward_1d():
sconv = SpectralConvBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4)
sconv = SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
sconv(x)
def test_backward_1d():
sconv = SpectralConvBlock1D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4)
sconv = SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
x.requires_grad = True
sconv(x)
l=torch.mean(sconv(x))
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5,3,10])
assert x._grad.shape == torch.Size([5, 3, 10])
def test_constructor_2d():
SpectralConvBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
def test_forward_2d():
sconv = SpectralConvBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
sconv = SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
sconv(x)
def test_backward_2d():
sconv = SpectralConvBlock2D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4])
sconv = SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
x.requires_grad = True
sconv(x)
l=torch.mean(sconv(x))
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5,3,10,10])
assert x._grad.shape == torch.Size([5, 3, 10, 10])
def test_constructor_3d():
SpectralConvBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
def test_forward_3d():
sconv = SpectralConvBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
sconv = SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
sconv(x)
def test_backward_3d():
sconv = SpectralConvBlock3D(input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4])
sconv = SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
x.requires_grad = True
sconv(x)
l=torch.mean(sconv(x))
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5,3,10,10,10])
assert x._grad.shape == torch.Size([5, 3, 10, 10, 10])