remove back compatibility files for version 0.2

This commit is contained in:
giovanni
2025-09-05 11:12:50 +02:00
committed by Giovanni Canali
parent ef3542486c
commit 684d691b78
27 changed files with 0 additions and 120 deletions

View File

@@ -0,0 +1,162 @@
from pina.model.block import ContinuousConvBlock
import torch
def prod(iterable):
p = 1
for n in iterable:
p *= n
return p
def make_grid(x):
def _transform_image(image):
# extracting image info
channels, dimension = image.size()[0], image.size()[1:]
# initializing transfomed image
coordinates = torch.zeros(
[channels, prod(dimension), len(dimension) + 1]
).to(image.device)
# creating the n dimensional mesh grid
values_mesh = [
torch.arange(0, dim).float().to(image.device) for dim in dimension
]
mesh = torch.meshgrid(values_mesh)
coordinates_mesh = [x.reshape(-1, 1) for x in mesh]
coordinates_mesh.append(0)
for count, channel in enumerate(image):
coordinates_mesh[-1] = channel.reshape(-1, 1)
coordinates[count] = torch.cat(coordinates_mesh, dim=1)
return coordinates
output = [_transform_image(current_image) for current_image in x]
return torch.stack(output).to(x.device)
class MLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(2, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 1),
)
def forward(self, x):
return self.model(x)
# INPUTS
channel_input = 2
channel_output = 6
batch = 2
N = 10
dim = [3, 3]
stride = {
"domain": [10, 10],
"start": [0, 0],
"jumps": [3, 3],
"direction": [1, 1.0],
}
dim_filter = len(dim)
dim_input = (batch, channel_input, 10, dim_filter)
dim_output = (batch, channel_output, 4, dim_filter)
x = torch.rand(dim_input)
x = make_grid(x)
def test_constructor():
model = MLP
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=None
)
def test_forward():
model = MLP
# simple forward
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv(x)
# simple forward with optimization
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model, optimize=True
)
conv(x)
def test_backward():
model = MLP
x = torch.rand(dim_input)
x = make_grid(x)
x.requires_grad = True
# simple backward
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv(x)
l = torch.mean(conv(x))
l.backward()
assert x._grad.shape == torch.Size([2, 2, 20, 3])
x = torch.rand(dim_input)
x = make_grid(x)
x.requires_grad = True
# simple backward with optimization
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model, optimize=True
)
conv(x)
l = torch.mean(conv(x))
l.backward()
assert x._grad.shape == torch.Size([2, 2, 20, 3])
def test_transpose():
model = MLP
# simple transpose
conv = ContinuousConvBlock(
channel_input, channel_output, dim, stride, model=model
)
conv2 = ContinuousConvBlock(
channel_output, channel_input, dim, stride, model=model
)
integrals = conv(x)
conv2.transpose(integrals[..., -1], x)
# stride_no_overlap = {"domain": [10, 10],
# "start": [0, 0],
# "jumps": dim,
# "direction": [1, 1.]}
## simple transpose with optimization
# conv = ContinuousConvBlock(channel_input,
# channel_output,
# dim,
# stride_no_overlap,
# model=model,
# optimize=True,
# no_overlap=True)
# integrals = conv(x)
# conv.transpose(integrals[..., -1], x)

View File

@@ -0,0 +1,110 @@
import torch
import pytest
from pina.model.block import PeriodicBoundaryEmbedding, FourierFeatureEmbedding
# test tolerance
tol = 1e-6
def check_same_columns(tensor):
# Get the first column and compute residual
residual = tensor - tensor[0]
zeros = torch.zeros_like(residual)
# Compare each column with the first column
all_same = torch.allclose(input=residual, other=zeros, atol=tol)
return all_same
def grad(u, x):
"""
Compute the first derivative of u with respect to x.
"""
return torch.autograd.grad(
u,
x,
grad_outputs=torch.ones_like(u),
create_graph=True,
allow_unused=True,
retain_graph=True,
)[0]
def test_constructor_PeriodicBoundaryEmbedding():
PeriodicBoundaryEmbedding(input_dimension=1, periods=2)
PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": 3, "y": 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0: 3, 1: 4})
PeriodicBoundaryEmbedding(input_dimension=1, periods=2, output_dimension=10)
with pytest.raises(TypeError):
PeriodicBoundaryEmbedding()
with pytest.raises(ValueError):
PeriodicBoundaryEmbedding(input_dimension=1.0, periods=1)
PeriodicBoundaryEmbedding(
input_dimension=1, periods=1, output_dimension=1.0
)
PeriodicBoundaryEmbedding(input_dimension=1, periods={"x": "x"})
PeriodicBoundaryEmbedding(input_dimension=1, periods={0: "x"})
@pytest.mark.parametrize("period", [1, 4, 10])
@pytest.mark.parametrize("input_dimension", [1, 2, 3])
def test_forward_backward_same_period_PeriodicBoundaryEmbedding(
input_dimension, period
):
func = torch.nn.Sequential(
PeriodicBoundaryEmbedding(
input_dimension=input_dimension, output_dimension=60, periods=period
),
torch.nn.Tanh(),
torch.nn.Linear(60, 60),
torch.nn.Tanh(),
torch.nn.Linear(60, 1),
)
# coordinates
x = period * torch.tensor([[0.0], [1.0]])
if input_dimension == 2:
x = torch.cartesian_prod(x.flatten(), x.flatten())
elif input_dimension == 3:
x = torch.cartesian_prod(x.flatten(), x.flatten(), x.flatten())
x.requires_grad = True
# output
f = func(x)
assert check_same_columns(f)
# compute backward
loss = f.mean()
loss.backward()
def test_constructor_FourierFeatureEmbedding():
FourierFeatureEmbedding(input_dimension=1, output_dimension=20, sigma=1)
with pytest.raises(TypeError):
FourierFeatureEmbedding()
with pytest.raises(RuntimeError):
FourierFeatureEmbedding(input_dimension=1, output_dimension=3, sigma=1)
with pytest.raises(ValueError):
FourierFeatureEmbedding(
input_dimension="x", output_dimension=20, sigma=1
)
FourierFeatureEmbedding(
input_dimension=1, output_dimension="x", sigma=1
)
FourierFeatureEmbedding(
input_dimension=1, output_dimension=20, sigma="x"
)
@pytest.mark.parametrize("output_dimension", [2, 4, 6])
@pytest.mark.parametrize("input_dimension", [1, 2, 3])
@pytest.mark.parametrize("sigma", [10, 1, 0.1])
def test_forward_backward_FourierFeatureEmbedding(
input_dimension, output_dimension, sigma
):
func = FourierFeatureEmbedding(input_dimension, output_dimension, sigma)
# coordinates
x = torch.rand((10, input_dimension), requires_grad=True)
# output
f = func(x)
assert f.shape[-1] == output_dimension
# compute backward
loss = f.mean()
loss.backward()

View File

@@ -0,0 +1,102 @@
from pina.model.block import FourierBlock1D, FourierBlock2D, FourierBlock3D
import torch
input_numb_fields = 3
output_numb_fields = 4
batch = 5
def test_constructor_1d():
FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5,
)
def test_forward_1d():
sconv = FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
sconv(x)
def test_backward_1d():
sconv = FourierBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10])
def test_constructor_2d():
FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
def test_forward_2d():
sconv = FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
sconv(x)
def test_backward_2d():
sconv = FourierBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10, 10])
def test_constructor_3d():
FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
def test_forward_3d():
sconv = FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
sconv(x)
def test_backward_3d():
sconv = FourierBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10, 10, 10])

View File

@@ -0,0 +1,70 @@
import torch
import pytest
from pina.model.block import LowRankBlock
from pina import LabelTensor
input_dimensions = 2
embedding_dimenion = 1
rank = 4
inner_size = 20
n_layers = 2
func = torch.nn.Tanh
bias = True
def test_constructor():
LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
def test_constructor_wrong():
with pytest.raises(ValueError):
LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=0.5,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
def test_forward():
block = LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
block(data.extract("u"), data.extract(["x", "y"]))
def test_backward():
block = LowRankBlock(
input_dimensions=input_dimensions,
embedding_dimenion=embedding_dimenion,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
data.requires_grad_(True)
out = block(data.extract("u"), data.extract(["x", "y"]))
loss = out.mean()
loss.backward()

View File

@@ -0,0 +1,75 @@
import torch
import pytest
from pina.model.block import OrthogonalBlock
torch.manual_seed(111)
list_matrices = [
torch.randn(10, 3),
torch.rand(100, 5),
torch.randn(5, 5),
]
list_prohibited_matrices_dim0 = list_matrices[:-1]
@pytest.mark.parametrize("dim", [-1, 0, 1, None])
@pytest.mark.parametrize("requires_grad", [True, False, None])
def test_constructor(dim, requires_grad):
if dim is None and requires_grad is None:
block = OrthogonalBlock()
elif dim is None:
block = OrthogonalBlock(requires_grad=requires_grad)
elif requires_grad is None:
block = OrthogonalBlock(dim=dim)
else:
block = OrthogonalBlock(dim=dim, requires_grad=requires_grad)
if dim is not None:
assert block.dim == dim
if requires_grad is not None:
assert block.requires_grad == requires_grad
def test_wrong_constructor():
with pytest.raises(IndexError):
OrthogonalBlock(2)
with pytest.raises(ValueError):
OrthogonalBlock("a")
@pytest.mark.parametrize("V", list_matrices)
def test_forward(V):
orth = OrthogonalBlock()
orth_row = OrthogonalBlock(0)
V_orth = orth(V)
V_orth_row = orth_row(V.T)
assert torch.allclose(V_orth.T @ V_orth, torch.eye(V.shape[1]), atol=1e-6)
assert torch.allclose(
V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6
)
@pytest.mark.parametrize("V", list_matrices)
def test_backward(V):
orth = OrthogonalBlock(requires_grad=True)
V_orth = orth(V)
loss = V_orth.mean()
loss.backward()
@pytest.mark.parametrize("V", list_matrices)
def test_wrong_backward(V):
orth = OrthogonalBlock(requires_grad=False)
V_orth = orth(V)
loss = V_orth.mean()
with pytest.raises(RuntimeError):
loss.backward()
@pytest.mark.parametrize("V", list_prohibited_matrices_dim0)
def test_forward_prohibited(V):
orth = OrthogonalBlock(0)
with pytest.raises(Warning):
V_orth = orth(V)
assert V.shape[0] > V.shape[1]

View File

@@ -0,0 +1,53 @@
import torch
import pytest
from pina.model.block import PirateNetBlock
data = torch.rand((20, 3))
@pytest.mark.parametrize("inner_size", [10, 20])
def test_constructor(inner_size):
PirateNetBlock(inner_size=inner_size, activation=torch.nn.Tanh)
# Should fail if inner_size is negative
with pytest.raises(AssertionError):
PirateNetBlock(inner_size=-1, activation=torch.nn.Tanh)
@pytest.mark.parametrize("inner_size", [10, 20])
def test_forward(inner_size):
model = PirateNetBlock(inner_size=inner_size, activation=torch.nn.Tanh)
# Create dummy embedding
dummy_embedding = torch.nn.Linear(data.shape[1], inner_size)
x = dummy_embedding(data)
# Create dummy U and V tensors
U = torch.rand((data.shape[0], inner_size))
V = torch.rand((data.shape[0], inner_size))
output_ = model(x, U, V)
assert output_.shape == (data.shape[0], inner_size)
@pytest.mark.parametrize("inner_size", [10, 20])
def test_backward(inner_size):
model = PirateNetBlock(inner_size=inner_size, activation=torch.nn.Tanh)
data.requires_grad_()
# Create dummy embedding
dummy_embedding = torch.nn.Linear(data.shape[1], inner_size)
x = dummy_embedding(data)
# Create dummy U and V tensors
U = torch.rand((data.shape[0], inner_size))
V = torch.rand((data.shape[0], inner_size))
output_ = model(x, U, V)
loss = torch.mean(output_)
loss.backward()
assert data.grad.shape == data.shape

View File

@@ -0,0 +1,100 @@
import torch
import pytest
from pina.model.block.pod_block import PODBlock
x = torch.linspace(-1, 1, 100)
toy_snapshots = torch.vstack(
[torch.exp(-(x**2)) * c for c in torch.linspace(0, 1, 10)]
)
def test_constructor():
pod = PODBlock(2)
pod = PODBlock(2, True)
pod = PODBlock(2, False)
with pytest.raises(TypeError):
pod = PODBlock()
@pytest.mark.parametrize("rank", [1, 2, 10])
def test_fit(rank, scale):
pod = PODBlock(rank, scale)
assert pod._basis == None
assert pod.basis == None
assert pod._scaler == None
assert pod._singular_values == None
assert pod.singular_values == None
assert pod.rank == rank
assert pod.scale_coefficients == scale
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
def test_fit(rank, scale, randomized):
pod = PODBlock(rank, scale)
pod.fit(toy_snapshots, randomized)
n_snap = toy_snapshots.shape[0]
dof = toy_snapshots.shape[1]
assert pod.basis.shape == (rank, dof)
assert pod._basis.shape == (n_snap, dof)
assert pod.singular_values.shape == (rank,)
assert pod._singular_values.shape == (n_snap,)
if scale is True:
assert pod._scaler["mean"].shape == (n_snap,)
assert pod._scaler["std"].shape == (n_snap,)
assert pod.scaler["mean"].shape == (rank,)
assert pod.scaler["std"].shape == (rank,)
assert pod.scaler["mean"].shape[0] == pod.basis.shape[0]
else:
assert pod._scaler == None
assert pod.scaler == None
def test_forward():
pod = PODBlock(1)
pod.fit(toy_snapshots)
c = pod(toy_snapshots)
assert c.shape[0] == toy_snapshots.shape[0]
assert c.shape[1] == pod.rank
torch.testing.assert_close(c.mean(dim=0), torch.zeros(pod.rank))
torch.testing.assert_close(c.std(dim=0), torch.ones(pod.rank))
c = pod(toy_snapshots[0])
assert c.shape[1] == pod.rank
assert c.shape[0] == 1
pod = PODBlock(2, False)
pod.fit(toy_snapshots)
c = pod(toy_snapshots)
torch.testing.assert_close(c, (pod.basis @ toy_snapshots.T).T)
with pytest.raises(AssertionError):
torch.testing.assert_close(c.mean(dim=0), torch.zeros(pod.rank))
torch.testing.assert_close(c.std(dim=0), torch.ones(pod.rank))
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
def test_expand(rank, scale, randomized):
pod = PODBlock(rank, scale)
pod.fit(toy_snapshots, randomized)
c = pod(toy_snapshots)
torch.testing.assert_close(pod.expand(c), toy_snapshots)
torch.testing.assert_close(pod.expand(c[0]), toy_snapshots[0].unsqueeze(0))
@pytest.mark.parametrize("scale", [True, False])
@pytest.mark.parametrize("rank", [1, 2, 10])
@pytest.mark.parametrize("randomized", [True, False])
def test_reduce_expand(rank, scale, randomized):
pod = PODBlock(rank, scale)
pod.fit(toy_snapshots, randomized)
torch.testing.assert_close(
pod.expand(pod.reduce(toy_snapshots)), toy_snapshots
)
torch.testing.assert_close(
pod.expand(pod.reduce(toy_snapshots[0])), toy_snapshots[0].unsqueeze(0)
)
# torch.testing.assert_close(pod.expand(pod.reduce(c[0])), c[0])

View File

@@ -0,0 +1,108 @@
import torch
import pytest
import math
from pina.model.block.rbf_block import RBFBlock
x = torch.linspace(-1, 1, 100)
toy_params = torch.linspace(0, 1, 10).unsqueeze(1)
toy_snapshots = torch.vstack([torch.exp(-(x**2)) * c for c in toy_params])
toy_params_test = torch.linspace(0, 1, 3).unsqueeze(1)
toy_snapshots_test = torch.vstack(
[torch.exp(-(x**2)) * c for c in toy_params_test]
)
kernels = [
"linear",
"thin_plate_spline",
"cubic",
"quintic",
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian",
]
noscale_invariant_kernels = [
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian",
]
scale_invariant_kernels = ["linear", "thin_plate_spline", "cubic", "quintic"]
def test_constructor_default():
rbf = RBFBlock()
assert rbf.kernel == "thin_plate_spline"
assert rbf.epsilon == 1
assert rbf.smoothing == 0.0
@pytest.mark.parametrize("kernel", kernels)
@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0])
def test_constructor_epsilon(kernel, epsilon):
if kernel in scale_invariant_kernels:
rbf = RBFBlock(kernel=kernel)
assert rbf.kernel == kernel
assert rbf.epsilon == 1
elif kernel in noscale_invariant_kernels:
with pytest.raises(ValueError):
rbf = RBFBlock(kernel=kernel)
rbf = RBFBlock(kernel=kernel, epsilon=epsilon)
assert rbf.kernel == kernel
assert rbf.epsilon == epsilon
assert rbf.smoothing == 0.0
@pytest.mark.parametrize("kernel", kernels)
@pytest.mark.parametrize("epsilon", [0.1, 1.0, 10.0])
@pytest.mark.parametrize("degree", [2, 3, 4])
@pytest.mark.parametrize("smoothing", [1e-5, 1e-3, 1e-1])
def test_constructor_all(kernel, epsilon, degree, smoothing):
rbf = RBFBlock(
kernel=kernel, epsilon=epsilon, degree=degree, smoothing=smoothing
)
assert rbf.kernel == kernel
assert rbf.epsilon == epsilon
assert rbf.degree == degree
assert rbf.smoothing == smoothing
assert rbf.y == None
assert rbf.d == None
assert rbf.powers == None
assert rbf._shift == None
assert rbf._scale == None
assert rbf._coeffs == None
def test_fit():
rbf = RBFBlock()
rbf.fit(toy_params, toy_snapshots)
ndim = toy_params.shape[1]
torch.testing.assert_close(rbf.y, toy_params)
torch.testing.assert_close(rbf.d, toy_snapshots)
assert rbf.powers.shape == (math.comb(rbf.degree + ndim, ndim), ndim)
assert rbf._shift.shape == (ndim,)
assert rbf._scale.shape == (ndim,)
assert rbf._coeffs.shape == (
rbf.powers.shape[0] + toy_snapshots.shape[0],
toy_snapshots.shape[1],
)
def test_forward():
rbf = RBFBlock()
rbf.fit(toy_params, toy_snapshots)
c = rbf(toy_params)
assert c.shape == toy_snapshots.shape
torch.testing.assert_close(c, toy_snapshots)
def test_forward_unseen_parameters():
rbf = RBFBlock()
rbf.fit(toy_params, toy_snapshots)
c = rbf(toy_params_test)
assert c.shape == toy_snapshots_test.shape
torch.testing.assert_close(c, toy_snapshots_test)

View File

@@ -0,0 +1,118 @@
from pina.model.block import ResidualBlock, EnhancedLinear
import torch
import torch.nn as nn
def test_constructor_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
res_block = ResidualBlock(
input_dim=10, output_dim=3, hidden_dim=4, spectral_norm=True
)
def test_forward_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
x = torch.rand(size=(80, 10))
y = res_block(x)
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
x = torch.rand(size=(80, 10))
x.requires_grad = True
y = res_block(x)
l = torch.mean(y)
l.backward()
assert x._grad.shape == torch.Size([80, 10])
def test_constructor_no_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
enhanced_linear = EnhancedLinear(linear_layer)
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
activation = nn.ReLU()
enhanced_linear = EnhancedLinear(linear_layer, activation)
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_constructor_no_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, dropout=dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
activation = nn.ReLU()
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_forward_enhanced_linear_no_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3))
x = torch.rand(size=(80, 10))
y = enhanced_linear(x)
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_no_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3))
x = torch.rand(size=(80, 10))
x.requires_grad = True
y = enhanced_linear(x)
l = torch.mean(y)
l.backward()
assert x._grad.shape == torch.Size([80, 10])
def test_forward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
x = torch.rand(size=(80, 10))
y = enhanced_linear(x)
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
x = torch.rand(size=(80, 10))
x.requires_grad = True
y = enhanced_linear(x)
l = torch.mean(y)
l.backward()
assert x._grad.shape == torch.Size([80, 10])

View File

@@ -0,0 +1,106 @@
from pina.model.block import (
SpectralConvBlock1D,
SpectralConvBlock2D,
SpectralConvBlock3D,
)
import torch
input_numb_fields = 3
output_numb_fields = 4
batch = 5
def test_constructor_1d():
SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=5,
)
def test_forward_1d():
sconv = SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
sconv(x)
def test_backward_1d():
sconv = SpectralConvBlock1D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=4,
)
x = torch.rand(batch, input_numb_fields, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10])
def test_constructor_2d():
SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
def test_forward_2d():
sconv = SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
sconv(x)
def test_backward_2d():
sconv = SpectralConvBlock2D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10, 10])
def test_constructor_3d():
SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
def test_forward_3d():
sconv = SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
sconv(x)
def test_backward_3d():
sconv = SpectralConvBlock3D(
input_numb_fields=input_numb_fields,
output_numb_fields=output_numb_fields,
n_modes=[5, 4, 4],
)
x = torch.rand(batch, input_numb_fields, 10, 10, 10)
x.requires_grad = True
sconv(x)
l = torch.mean(sconv(x))
l.backward()
assert x._grad.shape == torch.Size([5, 3, 10, 10, 10])