fnn update, pinn torch models, tests update. (#88)
* fnn update, remove labeltensors * allow custom torch models * updating tests --------- Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local> Co-authored-by: Dario Coscia <dariocoscia@dhcp-031.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
c8fb7715c4
commit
be11110bb2
140
tests/test_model/test_conv.py
Normal file
140
tests/test_model/test_conv.py
Normal file
@@ -0,0 +1,140 @@
|
||||
from pina.model.layers import ContinuousConv
|
||||
import torch
|
||||
|
||||
|
||||
def prod(iterable):
|
||||
p = 1
|
||||
for n in iterable:
|
||||
p *= n
|
||||
return p
|
||||
|
||||
|
||||
def make_grid(x):
|
||||
def _transform_image(image):
|
||||
|
||||
# extracting image info
|
||||
channels, dimension = image.size()[0], image.size()[1:]
|
||||
|
||||
# initializing transfomed image
|
||||
coordinates = torch.zeros(
|
||||
[channels, prod(dimension), len(dimension) + 1]).to(image.device)
|
||||
|
||||
# creating the n dimensional mesh grid
|
||||
values_mesh = [torch.arange(0, dim).float().to(
|
||||
image.device) for dim in dimension]
|
||||
mesh = torch.meshgrid(values_mesh)
|
||||
coordinates_mesh = [x.reshape(-1, 1) for x in mesh]
|
||||
coordinates_mesh.append(0)
|
||||
|
||||
for count, channel in enumerate(image):
|
||||
coordinates_mesh[-1] = channel.reshape(-1, 1)
|
||||
coordinates[count] = torch.cat(coordinates_mesh, dim=1)
|
||||
|
||||
return coordinates
|
||||
|
||||
output = [_transform_image(current_image) for current_image in x]
|
||||
return torch.stack(output).to(x.device)
|
||||
|
||||
|
||||
class MLP(torch.nn.Module):
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self. model = torch.nn.Sequential(torch.nn.Linear(2, 8),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(8, 8),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(8, 1))
|
||||
|
||||
def forward(self, x):
|
||||
return self.model(x)
|
||||
|
||||
|
||||
# INPUTS
|
||||
channel_input = 2
|
||||
channel_output = 6
|
||||
batch = 2
|
||||
N = 10
|
||||
dim = [3, 3]
|
||||
stride = {"domain": [10, 10],
|
||||
"start": [0, 0],
|
||||
"jumps": [3, 3],
|
||||
"direction": [1, 1.]}
|
||||
dim_filter = len(dim)
|
||||
dim_input = (batch, channel_input, 10, dim_filter)
|
||||
dim_output = (batch, channel_output, 4, dim_filter)
|
||||
x = torch.rand(dim_input)
|
||||
x = make_grid(x)
|
||||
|
||||
|
||||
def test_constructor():
|
||||
model = MLP
|
||||
|
||||
conv = ContinuousConv(channel_input,
|
||||
channel_output,
|
||||
dim,
|
||||
stride,
|
||||
model=model)
|
||||
conv = ContinuousConv(channel_input,
|
||||
channel_output,
|
||||
dim,
|
||||
stride,
|
||||
model=None)
|
||||
|
||||
|
||||
def test_forward():
|
||||
model = MLP
|
||||
|
||||
# simple forward
|
||||
conv = ContinuousConv(channel_input,
|
||||
channel_output,
|
||||
dim,
|
||||
stride,
|
||||
model=model)
|
||||
conv(x)
|
||||
|
||||
# simple forward with optimization
|
||||
conv = ContinuousConv(channel_input,
|
||||
channel_output,
|
||||
dim,
|
||||
stride,
|
||||
model=model,
|
||||
optimize=True)
|
||||
conv(x)
|
||||
|
||||
|
||||
def test_transpose():
|
||||
model = MLP
|
||||
|
||||
# simple transpose
|
||||
conv = ContinuousConv(channel_input,
|
||||
channel_output,
|
||||
dim,
|
||||
stride,
|
||||
model=model)
|
||||
|
||||
conv2 = ContinuousConv(channel_output,
|
||||
channel_input,
|
||||
dim,
|
||||
stride,
|
||||
model=model)
|
||||
|
||||
integrals = conv(x)
|
||||
conv2.transpose(integrals[..., -1], x)
|
||||
|
||||
stride_no_overlap = {"domain": [10, 10],
|
||||
"start": [0, 0],
|
||||
"jumps": dim,
|
||||
"direction": [1, 1.]}
|
||||
|
||||
# simple transpose with optimization
|
||||
# conv = ContinuousConv(channel_input,
|
||||
# channel_output,
|
||||
# dim,
|
||||
# stride_no_overlap,
|
||||
# model=model,
|
||||
# optimize=True,
|
||||
# no_overlap=True)
|
||||
|
||||
# integrals = conv(x)
|
||||
# conv.transpose(integrals[..., -1], x)
|
||||
31
tests/test_model/test_deeponet.py
Normal file
31
tests/test_model/test_deeponet.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pina import LabelTensor
|
||||
from pina.model import DeepONet
|
||||
from pina.model import FeedForward as FFN
|
||||
|
||||
data = torch.rand((20, 3))
|
||||
input_vars = ['a', 'b', 'c']
|
||||
output_vars = ['d']
|
||||
input_ = LabelTensor(data, input_vars)
|
||||
|
||||
# TODO
|
||||
|
||||
# def test_constructor():
|
||||
# branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
||||
# trunk = FFN(input_variables=['b'], output_variables=20)
|
||||
# onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||
|
||||
# def test_constructor_fails_when_invalid_inner_layer_size():
|
||||
# branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
||||
# trunk = FFN(input_variables=['b'], output_variables=19)
|
||||
# with pytest.raises(ValueError):
|
||||
# DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||
|
||||
# def test_forward():
|
||||
# branch = FFN(input_variables=['a', 'c'], output_variables=10)
|
||||
# trunk = FFN(input_variables=['b'], output_variables=10)
|
||||
# onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||
# output_ = onet(input_)
|
||||
# assert output_.labels == output_vars
|
||||
33
tests/test_model/test_fnn.py
Normal file
33
tests/test_model/test_fnn.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.model import FeedForward
|
||||
|
||||
|
||||
data = torch.rand((20, 3))
|
||||
input_vars = 3
|
||||
output_vars = 4
|
||||
|
||||
|
||||
def test_constructor():
|
||||
FeedForward(input_vars, output_vars)
|
||||
FeedForward(input_vars, output_vars, inner_size=10, n_layers=20)
|
||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2])
|
||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||
func=torch.nn.ReLU)
|
||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||
func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh])
|
||||
|
||||
|
||||
def test_constructor_wrong():
|
||||
with pytest.raises(RuntimeError):
|
||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||
func=[torch.nn.ReLU, torch.nn.ReLU])
|
||||
|
||||
|
||||
|
||||
def test_forward():
|
||||
dim_in, dim_out = 3, 2
|
||||
fnn = FeedForward(dim_in, dim_out)
|
||||
output_ = fnn(data)
|
||||
assert output_.shape == (data.shape[0], dim_out)
|
||||
55
tests/test_model/test_network.py
Normal file
55
tests/test_model/test_network.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pytest
|
||||
from pina.model import Network, FeedForward
|
||||
from pina import LabelTensor
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
t = (torch.sin(x.extract(['x'])*torch.pi) *
|
||||
torch.sin(x.extract(['y'])*torch.pi))
|
||||
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
input_variables = ['x', 'y']
|
||||
output_variables = ['u']
|
||||
data = torch.rand((20, 2))
|
||||
input_ = LabelTensor(data, input_variables)
|
||||
|
||||
|
||||
def test_constructor():
|
||||
net = FeedForward(2, 1)
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables)
|
||||
|
||||
|
||||
def test_forward():
|
||||
net = FeedForward(2, 1)
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables)
|
||||
output_ = pina_net(input_)
|
||||
assert output_.labels == output_variables
|
||||
|
||||
|
||||
def test_constructor_extrafeat():
|
||||
net = FeedForward(3, 1)
|
||||
feat = [myFeature()]
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables, extra_features=feat)
|
||||
|
||||
|
||||
def test_forward_extrafeat():
|
||||
net = FeedForward(3, 1)
|
||||
feat = [myFeature()]
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables, extra_features=feat)
|
||||
output_ = pina_net(input_)
|
||||
assert output_.labels == output_variables
|
||||
Reference in New Issue
Block a user