Fix Codacy Warnings (#477)

---------

Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
Filippo Olivo
2025-03-10 15:38:45 +01:00
committed by Nicola Demo
parent e3790e049a
commit 4177bfbb50
157 changed files with 3473 additions and 3839 deletions

View File

@@ -8,139 +8,166 @@ batch_size = 15
n_layers = 4
embedding_dim = 24
func = torch.nn.Tanh
coordinates_indices = ['p']
field_indices = ['v']
coordinates_indices = ["p"]
field_indices = ["v"]
def test_constructor():
# working constructor
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(field_indices),
len(field_indices))
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(field_indices), len(field_indices)
)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
func=func,
)
# not working constructor
with pytest.raises(ValueError):
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=3.2, # wrong
func=func)
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=3.2, # wrong
func=func,
)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=1) # wrong
AveragingNeuralOperator(
lifting_net=[0], # wrong
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=[0], # wront
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=[0], #wrong
field_indices=field_indices,
n_layers=n_layers,
func=func)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=[0], #wrong
n_layers=n_layers,
func=func)
lifting_net = torch.nn.Linear(len(coordinates_indices),
embedding_dim)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim,
len(field_indices))
func=1,
) # wrong
AveragingNeuralOperator(
lifting_net=[0], # wrong
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func,
)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=[0], # wront
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func,
)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=[0], # wrong
field_indices=field_indices,
n_layers=n_layers,
func=func,
)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=[0], # wrong
n_layers=n_layers,
func=func,
)
lifting_net = torch.nn.Linear(len(coordinates_indices), embedding_dim)
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
func=func,
)
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(embedding_dim, len(field_indices))
AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func,
)
def test_forward():
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(field_indices),
len(field_indices))
avno=AveragingNeuralOperator(
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(field_indices), len(field_indices)
)
avno = AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
func=func,
)
input_ = LabelTensor(
torch.rand(batch_size, 100,
len(coordinates_indices) + len(field_indices)), ['p', 'v'])
torch.rand(
batch_size, 100, len(coordinates_indices) + len(field_indices)
),
["p", "v"],
)
out = avno(input_)
assert out.shape == torch.Size(
[batch_size, input_.shape[1], len(field_indices)])
[batch_size, input_.shape[1], len(field_indices)]
)
def test_backward():
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(field_indices),
len(field_indices))
avno=AveragingNeuralOperator(
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(field_indices), len(field_indices)
)
avno = AveragingNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_layers=n_layers,
func=func)
func=func,
)
input_ = LabelTensor(
torch.rand(batch_size, 100,
len(coordinates_indices) + len(field_indices)), ['p', 'v'])
torch.rand(
batch_size, 100, len(coordinates_indices) + len(field_indices)
),
["p", "v"],
)
input_ = input_.requires_grad_()
out = avno(input_)
tmp = torch.linalg.norm(out)
tmp.backward()
grad = input_.grad
assert grad.shape == torch.Size(
[batch_size, input_.shape[1],
len(coordinates_indices) + len(field_indices)])
[
batch_size,
input_.shape[1],
len(coordinates_indices) + len(field_indices),
]
)

View File

@@ -7,42 +7,50 @@ from pina.model import DeepONet
from pina.model import FeedForward
data = torch.rand((20, 3))
input_vars = ['a', 'b', 'c']
input_vars = ["a", "b", "c"]
input_ = LabelTensor(data, input_vars)
symbol_funcs_red = DeepONet._symbol_functions(dim=-1)
output_dims = [1, 5, 10, 20]
def test_constructor():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction='+',
aggregator='*')
DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)
def test_constructor_fails_when_invalid_inner_layer_size():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=8)
with pytest.raises(ValueError):
DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction='+',
aggregator='*')
DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)
def test_forward_extract_str():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction='+',
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)
model(input_)
assert model(input_).shape[-1] == 1
@@ -50,82 +58,99 @@ def test_forward_extract_str():
def test_forward_extract_int():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=[0],
input_indeces_trunk_net=[1, 2],
reduction='+',
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=[0],
input_indeces_trunk_net=[1, 2],
reduction="+",
aggregator="*",
)
model(data)
def test_backward_extract_int():
data = torch.rand((20, 3))
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=[0],
input_indeces_trunk_net=[1, 2],
reduction='+',
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=[0],
input_indeces_trunk_net=[1, 2],
reduction="+",
aggregator="*",
)
data.requires_grad = True
model(data)
l=torch.mean(model(data))
l = torch.mean(model(data))
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])
def test_forward_extract_str_wrong():
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction='+',
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)
with pytest.raises(RuntimeError):
model(data)
def test_backward_extract_str_wrong():
data = torch.rand((20, 3))
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction='+',
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction="+",
aggregator="*",
)
data.requires_grad = True
with pytest.raises(RuntimeError):
model(data)
l=torch.mean(model(data))
l = torch.mean(model(data))
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])
@pytest.mark.parametrize('red', symbol_funcs_red)
@pytest.mark.parametrize("red", symbol_funcs_red)
def test_forward_symbol_funcs(red):
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction=red,
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction=red,
aggregator="*",
)
model(input_)
assert model(input_).shape[-1] == 1
@pytest.mark.parametrize('out_dim', output_dims)
@pytest.mark.parametrize("out_dim", output_dims)
def test_forward_callable_reduction(out_dim):
branch_net = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=2, output_dimensions=10)
reduction_layer = Linear(10, out_dim)
model = DeepONet(branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=['a'],
input_indeces_trunk_net=['b', 'c'],
reduction=reduction_layer,
aggregator='*')
model = DeepONet(
branch_net=branch_net,
trunk_net=trunk_net,
input_indeces_branch_net=["a"],
input_indeces_trunk_net=["b", "c"],
reduction=reduction_layer,
aggregator="*",
)
model(input_)
assert model(input_).shape[-1] == out_dim

View File

@@ -12,22 +12,25 @@ def test_constructor():
FeedForward(input_vars, output_vars)
FeedForward(input_vars, output_vars, inner_size=10, n_layers=20)
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2])
FeedForward(input_vars,
output_vars,
layers=[10, 20, 5, 2],
func=torch.nn.ReLU)
FeedForward(input_vars,
output_vars,
layers=[10, 20, 5, 2],
func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh])
FeedForward(
input_vars, output_vars, layers=[10, 20, 5, 2], func=torch.nn.ReLU
)
FeedForward(
input_vars,
output_vars,
layers=[10, 20, 5, 2],
func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh],
)
def test_constructor_wrong():
with pytest.raises(RuntimeError):
FeedForward(input_vars,
output_vars,
layers=[10, 20, 5, 2],
func=[torch.nn.ReLU, torch.nn.ReLU])
FeedForward(
input_vars,
output_vars,
layers=[10, 20, 5, 2],
func=[torch.nn.ReLU, torch.nn.ReLU],
)
def test_forward():
@@ -36,11 +39,12 @@ def test_forward():
output_ = fnn(data)
assert output_.shape == (data.shape[0], dim_out)
def test_backward():
dim_in, dim_out = 3, 2
fnn = FeedForward(dim_in, dim_out)
data.requires_grad = True
output_ = fnn(data)
l=torch.mean(output_)
l = torch.mean(output_)
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])

View File

@@ -13,36 +13,44 @@ def test_constructor():
projecting_net = torch.nn.Linear(60, output_channels)
# simple constructor
FNO(lifting_net=lifting_net,
FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
inner_size=60,
n_layers=5)
n_layers=5,
)
# simple constructor with n_modes list
FNO(lifting_net=lifting_net,
FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=[5, 3, 2],
dimensions=3,
inner_size=60,
n_layers=5)
n_layers=5,
)
# simple constructor with n_modes list of list
FNO(lifting_net=lifting_net,
FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=[[5, 3, 2], [5, 3, 2]],
dimensions=3,
inner_size=60,
n_layers=2)
n_layers=2,
)
# simple constructor with n_modes list of list
projecting_net = torch.nn.Linear(50, output_channels)
FNO(lifting_net=lifting_net,
FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
layers=[50, 50])
layers=[50, 50],
)
def test_1d_forward():
@@ -50,12 +58,14 @@ def test_1d_forward():
input_ = torch.rand(batch_size, resolution[0], input_channels)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=1,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=1,
inner_size=60,
n_layers=2,
)
out = fno(input_)
assert out.shape == torch.Size([batch_size, resolution[0], output_channels])
@@ -65,91 +75,120 @@ def test_1d_backward():
input_ = torch.rand(batch_size, resolution[0], input_channels)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=1,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=1,
inner_size=60,
n_layers=2,
)
input_.requires_grad = True
out = fno(input_)
l = torch.mean(out)
l.backward()
assert input_.grad.shape == torch.Size([batch_size, resolution[0], input_channels])
assert input_.grad.shape == torch.Size(
[batch_size, resolution[0], input_channels]
)
def test_2d_forward():
input_channels = 2
input_ = torch.rand(batch_size, resolution[0], resolution[1],
input_channels)
input_ = torch.rand(
batch_size, resolution[0], resolution[1], input_channels
)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=2,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=2,
inner_size=60,
n_layers=2,
)
out = fno(input_)
assert out.shape == torch.Size(
[batch_size, resolution[0], resolution[1], output_channels])
[batch_size, resolution[0], resolution[1], output_channels]
)
def test_2d_backward():
input_channels = 2
input_ = torch.rand(batch_size, resolution[0], resolution[1],
input_channels)
input_ = torch.rand(
batch_size, resolution[0], resolution[1], input_channels
)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=2,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=2,
inner_size=60,
n_layers=2,
)
input_.requires_grad = True
out = fno(input_)
l = torch.mean(out)
l.backward()
assert input_.grad.shape == torch.Size([
batch_size, resolution[0], resolution[1], input_channels
])
assert input_.grad.shape == torch.Size(
[batch_size, resolution[0], resolution[1], input_channels]
)
def test_3d_forward():
input_channels = 3
input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2],
input_channels)
input_ = torch.rand(
batch_size, resolution[0], resolution[1], resolution[2], input_channels
)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
inner_size=60,
n_layers=2,
)
out = fno(input_)
assert out.shape == torch.Size([
batch_size, resolution[0], resolution[1], resolution[2], output_channels
])
assert out.shape == torch.Size(
[
batch_size,
resolution[0],
resolution[1],
resolution[2],
output_channels,
]
)
def test_3d_backward():
input_channels = 3
input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2],
input_channels)
input_ = torch.rand(
batch_size, resolution[0], resolution[1], resolution[2], input_channels
)
lifting_net = torch.nn.Linear(input_channels, lifting_dim)
projecting_net = torch.nn.Linear(60, output_channels)
fno = FNO(lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
inner_size=60,
n_layers=2)
fno = FNO(
lifting_net=lifting_net,
projecting_net=projecting_net,
n_modes=5,
dimensions=3,
inner_size=60,
n_layers=2,
)
input_.requires_grad = True
out = fno(input_)
l = torch.mean(out)
l.backward()
assert input_.grad.shape == torch.Size([
batch_size, resolution[0], resolution[1], resolution[2], input_channels
])
assert input_.grad.shape == torch.Size(
[
batch_size,
resolution[0],
resolution[1],
resolution[2],
input_channels,
]
)

View File

@@ -10,29 +10,46 @@ data = torch.rand(size=(batch_size, numb, input_dim), requires_grad=True)
output_shape = torch.Size([batch_size, numb, output_dim])
lifting_operator = FeedForward(input_dimensions=input_dim, output_dimensions=embedding_dim)
projection_operator = FeedForward(input_dimensions=embedding_dim, output_dimensions=output_dim)
integral_kernels = torch.nn.Sequential(FeedForward(input_dimensions=embedding_dim,
output_dimensions=embedding_dim),
FeedForward(input_dimensions=embedding_dim,
output_dimensions=embedding_dim),)
lifting_operator = FeedForward(
input_dimensions=input_dim, output_dimensions=embedding_dim
)
projection_operator = FeedForward(
input_dimensions=embedding_dim, output_dimensions=output_dim
)
integral_kernels = torch.nn.Sequential(
FeedForward(
input_dimensions=embedding_dim, output_dimensions=embedding_dim
),
FeedForward(
input_dimensions=embedding_dim, output_dimensions=embedding_dim
),
)
def test_constructor():
KernelNeuralOperator(lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator)
KernelNeuralOperator(
lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator,
)
def test_forward():
operator = KernelNeuralOperator(lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator)
operator = KernelNeuralOperator(
lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator,
)
out = operator(data)
assert out.shape == output_shape
def test_backward():
operator = KernelNeuralOperator(lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator)
operator = KernelNeuralOperator(
lifting_operator=lifting_operator,
integral_kernels=integral_kernels,
projection_operator=projection_operator,
)
out = operator(data)
loss = torch.nn.functional.mse_loss(out, torch.zeros_like(out))
loss.backward()

View File

@@ -10,132 +10,157 @@ embedding_dim = 24
func = torch.nn.Tanh
rank = 4
n_kernel_layers = 3
field_indices = ['u']
coordinates_indices = ['x', 'y']
field_indices = ["u"]
coordinates_indices = ["x", "y"]
def test_constructor():
# working constructor
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices),
len(field_indices))
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(coordinates_indices), len(field_indices)
)
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
rank=rank,
)
# not working constructor
with pytest.raises(ValueError):
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=3.2, # wrong
rank=rank)
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=3.2, # wrong
rank=rank,
)
LowRankNeuralOperator(
lifting_net=[0], # wrong
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
lifting_net=[0], # wrong
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank,
)
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=[0], # wront
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
lifting_net=lifting_net,
projecting_net=[0], # wront
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank,
)
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=[0], #wrong
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=[0], # wrong
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank,
)
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=[0], #wrong
n_kernel_layers=n_kernel_layers,
rank=rank)
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=[0], # wrong
n_kernel_layers=n_kernel_layers,
rank=rank,
)
lifting_net = torch.nn.Linear(len(coordinates_indices),
embedding_dim)
lifting_net = torch.nn.Linear(len(coordinates_indices), embedding_dim)
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim,
len(field_indices))
rank=rank,
)
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(embedding_dim, len(field_indices))
LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
rank=rank,
)
def test_forward():
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices),
len(field_indices))
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(coordinates_indices), len(field_indices)
)
lno = LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
rank=rank,
)
input_ = LabelTensor(
torch.rand(batch_size, 100,
len(coordinates_indices) + len(field_indices)),
coordinates_indices + field_indices)
torch.rand(
batch_size, 100, len(coordinates_indices) + len(field_indices)
),
coordinates_indices + field_indices,
)
out = lno(input_)
assert out.shape == torch.Size(
[batch_size, input_.shape[1], len(field_indices)])
[batch_size, input_.shape[1], len(field_indices)]
)
def test_backward():
lifting_net = torch.nn.Linear(len(coordinates_indices) + len(field_indices),
embedding_dim)
projecting_net = torch.nn.Linear(embedding_dim + len(coordinates_indices),
len(field_indices))
lno=LowRankNeuralOperator(
lifting_net = torch.nn.Linear(
len(coordinates_indices) + len(field_indices), embedding_dim
)
projecting_net = torch.nn.Linear(
embedding_dim + len(coordinates_indices), len(field_indices)
)
lno = LowRankNeuralOperator(
lifting_net=lifting_net,
projecting_net=projecting_net,
coordinates_indices=coordinates_indices,
field_indices=field_indices,
n_kernel_layers=n_kernel_layers,
rank=rank)
rank=rank,
)
input_ = LabelTensor(
torch.rand(batch_size, 100,
len(coordinates_indices) + len(field_indices)),
coordinates_indices + field_indices)
torch.rand(
batch_size, 100, len(coordinates_indices) + len(field_indices)
),
coordinates_indices + field_indices,
)
input_ = input_.requires_grad_()
out = lno(input_)
tmp = torch.linalg.norm(out)
tmp.backward()
grad = input_.grad
assert grad.shape == torch.Size(
[batch_size, input_.shape[1],
len(coordinates_indices) + len(field_indices)])
[
batch_size,
input_.shape[1],
len(coordinates_indices) + len(field_indices),
]
)

View File

@@ -6,7 +6,7 @@ from pina.model import MIONet
from pina.model import FeedForward
data = torch.rand((20, 3))
input_vars = ['a', 'b', 'c']
input_vars = ["a", "b", "c"]
input_ = LabelTensor(data, input_vars)
@@ -14,42 +14,42 @@ def test_constructor():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']}
MIONet(networks=networks, reduction='+', aggregator='*')
networks = {branch_net1: ["x"], branch_net2: ["x", "y"], trunk_net: ["z"]}
MIONet(networks=networks, reduction="+", aggregator="*")
def test_constructor_fails_when_invalid_inner_layer_size():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=12)
networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']}
networks = {branch_net1: ["x"], branch_net2: ["x", "y"], trunk_net: ["z"]}
with pytest.raises(ValueError):
MIONet(networks=networks, reduction='+', aggregator='*')
MIONet(networks=networks, reduction="+", aggregator="*")
def test_forward_extract_str():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']}
model = MIONet(networks=networks, reduction='+', aggregator='*')
networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]}
model = MIONet(networks=networks, reduction="+", aggregator="*")
model(input_)
def test_backward_extract_str():
data = torch.rand((20, 3))
data.requires_grad = True
input_vars = ['a', 'b', 'c']
input_vars = ["a", "b", "c"]
input_ = LabelTensor(data, input_vars)
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']}
model = MIONet(networks=networks, reduction='+', aggregator='*')
networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]}
model = MIONet(networks=networks, reduction="+", aggregator="*")
model(input_)
l = torch.mean(model(input_))
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])
def test_forward_extract_int():
@@ -57,7 +57,7 @@ def test_forward_extract_int():
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: [0], branch_net2: [1], trunk_net: [2]}
model = MIONet(networks=networks, reduction='+', aggregator='*')
model = MIONet(networks=networks, reduction="+", aggregator="*")
model(data)
@@ -68,19 +68,19 @@ def test_backward_extract_int():
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: [0], branch_net2: [1], trunk_net: [2]}
model = MIONet(networks=networks, reduction='+', aggregator='*')
model = MIONet(networks=networks, reduction="+", aggregator="*")
model(data)
l = torch.mean(model(data))
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])
def test_forward_extract_str_wrong():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']}
model = MIONet(networks=networks, reduction='+', aggregator='*')
networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]}
model = MIONet(networks=networks, reduction="+", aggregator="*")
with pytest.raises(RuntimeError):
model(data)
@@ -91,10 +91,10 @@ def test_backward_extract_str_wrong():
branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10)
branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10)
trunk_net = FeedForward(input_dimensions=1, output_dimensions=10)
networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']}
model = MIONet(networks=networks, reduction='+', aggregator='*')
networks = {branch_net1: ["a"], branch_net2: ["b"], trunk_net: ["c"]}
model = MIONet(networks=networks, reduction="+", aggregator="*")
with pytest.raises(RuntimeError):
model(data)
l = torch.mean(model(data))
l.backward()
assert data._grad.shape == torch.Size([20,3])
assert data._grad.shape == torch.Size([20, 3])

View File

@@ -9,15 +9,17 @@ def test_constructor():
# wrong transformer nets (not 2)
with pytest.raises(ValueError):
ResidualFeedForward(input_dimensions=2,
output_dimensions=1,
transformer_nets=[torch.nn.Linear(2, 20)])
ResidualFeedForward(
input_dimensions=2,
output_dimensions=1,
transformer_nets=[torch.nn.Linear(2, 20)],
)
# wrong transformer nets (not nn.Module)
with pytest.raises(ValueError):
ResidualFeedForward(input_dimensions=2,
output_dimensions=1,
transformer_nets=[2, 2])
ResidualFeedForward(
input_dimensions=2, output_dimensions=1, transformer_nets=[2, 2]
)
def test_forward():
@@ -34,4 +36,3 @@ def test_backward():
l = torch.mean(model(x))
l.backward()
assert x.grad.shape == torch.Size([10, 2])

View File

@@ -9,54 +9,61 @@ output_vars = 4
valid_args = [
{
'knots': torch.tensor([0., 0., 0., 1., 2., 3., 3., 3.]),
'control_points': torch.tensor([0., 0., 1., 0., 0.]),
'order': 3
"knots": torch.tensor([0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0]),
"control_points": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"order": 3,
},
{
'knots': torch.tensor([-2., -2., -2., -2., -1., 0., 1., 2., 2., 2., 2.]),
'control_points': torch.tensor([0., 0., 0., 6., 0., 0., 0.]),
'order': 4
"knots": torch.tensor(
[-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0]
),
"control_points": torch.tensor([0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0]),
"order": 4,
},
# {'control_points': {'n': 5, 'dim': 1}, 'order': 2},
# {'control_points': {'n': 7, 'dim': 1}, 'order': 3}
]
def scipy_check(model, x, y):
from scipy.interpolate._bsplines import BSpline
import numpy as np
spline = BSpline(
t=model.knots.detach().numpy(),
c=model.control_points.detach().numpy(),
k=model.order-1
k=model.order - 1,
)
y_scipy = spline(x).flatten()
y = y.detach().numpy()
np.testing.assert_allclose(y, y_scipy, atol=1e-5)
@pytest.mark.parametrize("args", valid_args)
def test_constructor(args):
Spline(**args)
def test_constructor_wrong():
with pytest.raises(ValueError):
Spline()
@pytest.mark.parametrize("args", valid_args)
def test_forward(args):
min_x = args['knots'][0]
max_x = args['knots'][-1]
min_x = args["knots"][0]
max_x = args["knots"][-1]
xi = torch.linspace(min_x, max_x, 1000)
model = Spline(**args)
yi = model(xi).squeeze()
scipy_check(model, xi, yi)
return
return
@pytest.mark.parametrize("args", valid_args)
def test_backward(args):
min_x = args['knots'][0]
max_x = args['knots'][-1]
min_x = args["knots"][0]
max_x = args["knots"][-1]
xi = torch.linspace(min_x, max_x, 100)
model = Spline(**args)
yi = model(xi)