fnn update, pinn torch models, tests update. (#88)
* fnn update, remove labeltensors * allow custom torch models * updating tests --------- Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local> Co-authored-by: Dario Coscia <dariocoscia@dhcp-031.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
c8fb7715c4
commit
be11110bb2
@@ -3,7 +3,7 @@ __all__ = [
|
|||||||
'LabelTensor',
|
'LabelTensor',
|
||||||
'Plotter',
|
'Plotter',
|
||||||
'Condition',
|
'Condition',
|
||||||
'Span',
|
'CartesianDomain',
|
||||||
'Location',
|
'Location',
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -11,6 +11,6 @@ from .meta import *
|
|||||||
from .label_tensor import LabelTensor
|
from .label_tensor import LabelTensor
|
||||||
from .pinn import PINN
|
from .pinn import PINN
|
||||||
from .plotter import Plotter
|
from .plotter import Plotter
|
||||||
from .span import Span
|
from .cartesian import CartesianDomain
|
||||||
from .condition import Condition
|
from .condition import Condition
|
||||||
from .location import Location
|
from .location import Location
|
||||||
|
|||||||
@@ -10,10 +10,12 @@ class FeedForward(torch.nn.Module):
|
|||||||
The PINA implementation of feedforward network, also refered as multilayer
|
The PINA implementation of feedforward network, also refered as multilayer
|
||||||
perceptron.
|
perceptron.
|
||||||
|
|
||||||
:param list(str) input_variables: the list containing the labels
|
:param int input_variables: The number of input components of the model.
|
||||||
corresponding to the input components of the model.
|
Expected tensor shape of the form (*, input_variables), where *
|
||||||
:param list(str) output_variables: the list containing the labels
|
means any number of dimensions including none.
|
||||||
corresponding to the components of the output computed by the model.
|
:param int output_variables: The number of output components of the model.
|
||||||
|
Expected tensor shape of the form (*, output_variables), where *
|
||||||
|
means any number of dimensions including none.
|
||||||
:param int inner_size: number of neurons in the hidden layer(s). Default is
|
:param int inner_size: number of neurons in the hidden layer(s). Default is
|
||||||
20.
|
20.
|
||||||
:param int n_layers: number of hidden layers. Default is 2.
|
:param int n_layers: number of hidden layers. Default is 2.
|
||||||
@@ -24,46 +26,31 @@ class FeedForward(torch.nn.Module):
|
|||||||
:param iterable(int) layers: a list containing the number of neurons for
|
:param iterable(int) layers: a list containing the number of neurons for
|
||||||
any hidden layers. If specified, the parameters `n_layers` e
|
any hidden layers. If specified, the parameters `n_layers` e
|
||||||
`inner_size` are not considered.
|
`inner_size` are not considered.
|
||||||
:param iterable(torch.nn.Module) extra_features: the additional input
|
|
||||||
features to use ad augmented input.
|
|
||||||
:param bool bias: If `True` the MLP will consider some bias.
|
:param bool bias: If `True` the MLP will consider some bias.
|
||||||
"""
|
"""
|
||||||
def __init__(self, input_variables, output_variables, inner_size=20,
|
def __init__(self, input_variables, output_variables, inner_size=20,
|
||||||
n_layers=2, func=nn.Tanh, layers=None, extra_features=None,
|
n_layers=2, func=nn.Tanh, layers=None, bias=True):
|
||||||
bias=True):
|
|
||||||
"""
|
"""
|
||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
if extra_features is None:
|
|
||||||
extra_features = []
|
|
||||||
self.extra_features = nn.Sequential(*extra_features)
|
|
||||||
|
|
||||||
if isinstance(input_variables, int):
|
if not isinstance(input_variables, int):
|
||||||
self.input_variables = None
|
raise ValueError('input_variables expected to be int.')
|
||||||
self.input_dimension = input_variables
|
self.input_dimension = input_variables
|
||||||
elif isinstance(input_variables, (tuple, list)):
|
|
||||||
self.input_variables = input_variables
|
|
||||||
self.input_dimension = len(input_variables)
|
|
||||||
|
|
||||||
if isinstance(output_variables, int):
|
|
||||||
self.output_variables = None
|
|
||||||
self.output_dimension = output_variables
|
|
||||||
elif isinstance(output_variables, (tuple, list)):
|
|
||||||
self.output_variables = output_variables
|
|
||||||
self.output_dimension = len(output_variables)
|
|
||||||
|
|
||||||
n_features = len(extra_features)
|
|
||||||
|
|
||||||
|
if not isinstance(output_variables, int):
|
||||||
|
raise ValueError('output_variables expected to be int.')
|
||||||
|
self.output_dimension = output_variables
|
||||||
if layers is None:
|
if layers is None:
|
||||||
layers = [inner_size] * n_layers
|
layers = [inner_size] * n_layers
|
||||||
|
|
||||||
tmp_layers = layers.copy()
|
tmp_layers = layers.copy()
|
||||||
tmp_layers.insert(0, self.input_dimension+n_features)
|
tmp_layers.insert(0, self.input_dimension)
|
||||||
tmp_layers.append(self.output_dimension)
|
tmp_layers.append(self.output_dimension)
|
||||||
|
|
||||||
self.layers = []
|
self.layers = []
|
||||||
for i in range(len(tmp_layers)-1):
|
for i in range(len(tmp_layers) - 1):
|
||||||
self.layers.append(
|
self.layers.append(
|
||||||
nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias)
|
nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias)
|
||||||
)
|
)
|
||||||
@@ -71,7 +58,7 @@ class FeedForward(torch.nn.Module):
|
|||||||
if isinstance(func, list):
|
if isinstance(func, list):
|
||||||
self.functions = func
|
self.functions = func
|
||||||
else:
|
else:
|
||||||
self.functions = [func for _ in range(len(self.layers)-1)]
|
self.functions = [func for _ in range(len(self.layers) - 1)]
|
||||||
|
|
||||||
if len(self.layers) != len(self.functions) + 1:
|
if len(self.layers) != len(self.functions) + 1:
|
||||||
raise RuntimeError('uncosistent number of layers and functions')
|
raise RuntimeError('uncosistent number of layers and functions')
|
||||||
@@ -94,16 +81,4 @@ class FeedForward(torch.nn.Module):
|
|||||||
:return: the output computed by the model.
|
:return: the output computed by the model.
|
||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
|
return self.model(x)
|
||||||
if self.input_variables:
|
|
||||||
x = x.extract(self.input_variables)
|
|
||||||
|
|
||||||
for feature in self.extra_features:
|
|
||||||
x = x.append(feature(x))
|
|
||||||
|
|
||||||
output = self.model(x).as_subclass(LabelTensor)
|
|
||||||
|
|
||||||
if self.output_variables:
|
|
||||||
output.labels = self.output_variables
|
|
||||||
|
|
||||||
return output
|
|
||||||
|
|||||||
11
pina/pinn.py
11
pina/pinn.py
@@ -3,6 +3,7 @@ import torch
|
|||||||
import torch.optim.lr_scheduler as lrs
|
import torch.optim.lr_scheduler as lrs
|
||||||
|
|
||||||
from .problem import AbstractProblem
|
from .problem import AbstractProblem
|
||||||
|
from .model import Network
|
||||||
from .label_tensor import LabelTensor
|
from .label_tensor import LabelTensor
|
||||||
from .utils import merge_tensors, PinaDataset
|
from .utils import merge_tensors, PinaDataset
|
||||||
|
|
||||||
@@ -15,6 +16,7 @@ class PINN(object):
|
|||||||
def __init__(self,
|
def __init__(self,
|
||||||
problem,
|
problem,
|
||||||
model,
|
model,
|
||||||
|
extra_features=None,
|
||||||
optimizer=torch.optim.Adam,
|
optimizer=torch.optim.Adam,
|
||||||
optimizer_kwargs=None,
|
optimizer_kwargs=None,
|
||||||
lr=0.001,
|
lr=0.001,
|
||||||
@@ -28,6 +30,8 @@ class PINN(object):
|
|||||||
'''
|
'''
|
||||||
:param AbstractProblem problem: the formualation of the problem.
|
:param AbstractProblem problem: the formualation of the problem.
|
||||||
:param torch.nn.Module model: the neural network model to use.
|
:param torch.nn.Module model: the neural network model to use.
|
||||||
|
:param torch.nn.Module extra_features: the additional input
|
||||||
|
features to use as augmented input.
|
||||||
:param torch.optim.Optimizer optimizer: the neural network optimizer to
|
:param torch.optim.Optimizer optimizer: the neural network optimizer to
|
||||||
use; default is `torch.optim.Adam`.
|
use; default is `torch.optim.Adam`.
|
||||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||||
@@ -68,7 +72,12 @@ class PINN(object):
|
|||||||
self.dtype = dtype
|
self.dtype = dtype
|
||||||
self.history_loss = {}
|
self.history_loss = {}
|
||||||
|
|
||||||
self.model = model
|
|
||||||
|
self.model = Network(model=model,
|
||||||
|
input_variables=problem.input_variables,
|
||||||
|
output_variables=problem.output_variables,
|
||||||
|
extra_features=extra_features)
|
||||||
|
|
||||||
self.model.to(dtype=self.dtype, device=self.device)
|
self.model.to(dtype=self.dtype, device=self.device)
|
||||||
|
|
||||||
self.truth_values = {}
|
self.truth_values = {}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
import torch
|
import torch
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from pina import LabelTensor, Condition, Span, PINN
|
from pina import LabelTensor, Condition, CartesianDomain, PINN
|
||||||
from pina.problem import SpatialProblem
|
from pina.problem import SpatialProblem
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.operators import nabla
|
from pina.operators import nabla
|
||||||
|
|
||||||
|
|
||||||
example_domain = Span({'x': [0, 1], 'y': [0, 1]})
|
example_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||||
def example_dirichlet(input_, output_):
|
def example_dirichlet(input_, output_):
|
||||||
value = 0.0
|
value = 0.0
|
||||||
return output_.extract(['u']) - value
|
return output_.extract(['u']) - value
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
import pytest
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from pina import LabelTensor
|
|
||||||
from pina.model import DeepONet
|
|
||||||
from pina.model import FeedForward as FFN
|
|
||||||
|
|
||||||
data = torch.rand((20, 3))
|
|
||||||
input_vars = ['a', 'b', 'c']
|
|
||||||
output_vars = ['d']
|
|
||||||
input_ = LabelTensor(data, input_vars)
|
|
||||||
|
|
||||||
|
|
||||||
def test_constructor():
|
|
||||||
branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
|
||||||
trunk = FFN(input_variables=['b'], output_variables=20)
|
|
||||||
onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
|
||||||
|
|
||||||
def test_constructor_fails_when_invalid_inner_layer_size():
|
|
||||||
branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
|
||||||
trunk = FFN(input_variables=['b'], output_variables=19)
|
|
||||||
with pytest.raises(ValueError):
|
|
||||||
DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
|
||||||
|
|
||||||
def test_forward():
|
|
||||||
branch = FFN(input_variables=['a', 'c'], output_variables=10)
|
|
||||||
trunk = FFN(input_variables=['b'], output_variables=10)
|
|
||||||
onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
|
||||||
output_ = onet(input_)
|
|
||||||
assert output_.labels == output_vars
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
import torch
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from pina import LabelTensor
|
|
||||||
from pina.model import FeedForward
|
|
||||||
|
|
||||||
class myFeature(torch.nn.Module):
|
|
||||||
"""
|
|
||||||
Feature: sin(pi*x)
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
super(myFeature, self).__init__()
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return LabelTensor(torch.sin(torch.pi * x.extract('a')), 'sin(a)')
|
|
||||||
|
|
||||||
|
|
||||||
data = torch.rand((20, 3))
|
|
||||||
input_vars = ['a', 'b', 'c']
|
|
||||||
output_vars = ['d', 'e']
|
|
||||||
input_ = LabelTensor(data, input_vars)
|
|
||||||
|
|
||||||
|
|
||||||
def test_constructor():
|
|
||||||
FeedForward(input_vars, output_vars)
|
|
||||||
FeedForward(3, 4)
|
|
||||||
FeedForward(input_vars, output_vars, extra_features=[myFeature()])
|
|
||||||
FeedForward(input_vars, output_vars, inner_size=10, n_layers=20)
|
|
||||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2])
|
|
||||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
|
||||||
func=torch.nn.ReLU)
|
|
||||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
|
||||||
func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh])
|
|
||||||
|
|
||||||
|
|
||||||
def test_constructor_wrong():
|
|
||||||
with pytest.raises(RuntimeError):
|
|
||||||
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
|
||||||
func=[torch.nn.ReLU, torch.nn.ReLU])
|
|
||||||
|
|
||||||
|
|
||||||
def test_forward():
|
|
||||||
fnn = FeedForward(input_vars, output_vars)
|
|
||||||
output_ = fnn(input_)
|
|
||||||
assert output_.labels == output_vars
|
|
||||||
|
|
||||||
|
|
||||||
def test_forward2():
|
|
||||||
dim_in, dim_out = 3, 2
|
|
||||||
fnn = FeedForward(dim_in, dim_out)
|
|
||||||
output_ = fnn(input_)
|
|
||||||
assert output_.shape == (input_.shape[0], dim_out)
|
|
||||||
|
|
||||||
|
|
||||||
def test_forward_features():
|
|
||||||
fnn = FeedForward(input_vars, output_vars, extra_features=[myFeature()])
|
|
||||||
output_ = fnn(input_)
|
|
||||||
assert output_.labels == output_vars
|
|
||||||
31
tests/test_model/test_deeponet.py
Normal file
31
tests/test_model/test_deeponet.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
import pytest
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from pina import LabelTensor
|
||||||
|
from pina.model import DeepONet
|
||||||
|
from pina.model import FeedForward as FFN
|
||||||
|
|
||||||
|
data = torch.rand((20, 3))
|
||||||
|
input_vars = ['a', 'b', 'c']
|
||||||
|
output_vars = ['d']
|
||||||
|
input_ = LabelTensor(data, input_vars)
|
||||||
|
|
||||||
|
# TODO
|
||||||
|
|
||||||
|
# def test_constructor():
|
||||||
|
# branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
||||||
|
# trunk = FFN(input_variables=['b'], output_variables=20)
|
||||||
|
# onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||||
|
|
||||||
|
# def test_constructor_fails_when_invalid_inner_layer_size():
|
||||||
|
# branch = FFN(input_variables=['a', 'c'], output_variables=20)
|
||||||
|
# trunk = FFN(input_variables=['b'], output_variables=19)
|
||||||
|
# with pytest.raises(ValueError):
|
||||||
|
# DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||||
|
|
||||||
|
# def test_forward():
|
||||||
|
# branch = FFN(input_variables=['a', 'c'], output_variables=10)
|
||||||
|
# trunk = FFN(input_variables=['b'], output_variables=10)
|
||||||
|
# onet = DeepONet(nets=[trunk, branch], output_variables=output_vars)
|
||||||
|
# output_ = onet(input_)
|
||||||
|
# assert output_.labels == output_vars
|
||||||
33
tests/test_model/test_fnn.py
Normal file
33
tests/test_model/test_fnn.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
import torch
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from pina.model import FeedForward
|
||||||
|
|
||||||
|
|
||||||
|
data = torch.rand((20, 3))
|
||||||
|
input_vars = 3
|
||||||
|
output_vars = 4
|
||||||
|
|
||||||
|
|
||||||
|
def test_constructor():
|
||||||
|
FeedForward(input_vars, output_vars)
|
||||||
|
FeedForward(input_vars, output_vars, inner_size=10, n_layers=20)
|
||||||
|
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2])
|
||||||
|
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||||
|
func=torch.nn.ReLU)
|
||||||
|
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||||
|
func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh])
|
||||||
|
|
||||||
|
|
||||||
|
def test_constructor_wrong():
|
||||||
|
with pytest.raises(RuntimeError):
|
||||||
|
FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2],
|
||||||
|
func=[torch.nn.ReLU, torch.nn.ReLU])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_forward():
|
||||||
|
dim_in, dim_out = 3, 2
|
||||||
|
fnn = FeedForward(dim_in, dim_out)
|
||||||
|
output_ = fnn(data)
|
||||||
|
assert output_.shape == (data.shape[0], dim_out)
|
||||||
@@ -1,38 +1,10 @@
|
|||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import pytest
|
import pytest
|
||||||
from pina.model import Network
|
from pina.model import Network, FeedForward
|
||||||
from pina import LabelTensor
|
from pina import LabelTensor
|
||||||
|
|
||||||
|
|
||||||
class SimpleNet(nn.Module):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.layers = nn.Sequential(
|
|
||||||
nn.Linear(2, 20),
|
|
||||||
nn.Tanh(),
|
|
||||||
nn.Linear(20, 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return self.layers(x)
|
|
||||||
|
|
||||||
|
|
||||||
class SimpleNetExtraFeat(nn.Module):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.layers = nn.Sequential(
|
|
||||||
nn.Linear(3, 20),
|
|
||||||
nn.Tanh(),
|
|
||||||
nn.Linear(20, 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return self.layers(x)
|
|
||||||
|
|
||||||
|
|
||||||
class myFeature(torch.nn.Module):
|
class myFeature(torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
Feature: sin(x)
|
Feature: sin(x)
|
||||||
@@ -54,13 +26,13 @@ input_ = LabelTensor(data, input_variables)
|
|||||||
|
|
||||||
|
|
||||||
def test_constructor():
|
def test_constructor():
|
||||||
net = SimpleNet()
|
net = FeedForward(2, 1)
|
||||||
pina_net = Network(model=net, input_variables=input_variables,
|
pina_net = Network(model=net, input_variables=input_variables,
|
||||||
output_variables=output_variables)
|
output_variables=output_variables)
|
||||||
|
|
||||||
|
|
||||||
def test_forward():
|
def test_forward():
|
||||||
net = SimpleNet()
|
net = FeedForward(2, 1)
|
||||||
pina_net = Network(model=net, input_variables=input_variables,
|
pina_net = Network(model=net, input_variables=input_variables,
|
||||||
output_variables=output_variables)
|
output_variables=output_variables)
|
||||||
output_ = pina_net(input_)
|
output_ = pina_net(input_)
|
||||||
@@ -68,14 +40,14 @@ def test_forward():
|
|||||||
|
|
||||||
|
|
||||||
def test_constructor_extrafeat():
|
def test_constructor_extrafeat():
|
||||||
net = SimpleNetExtraFeat()
|
net = FeedForward(3, 1)
|
||||||
feat = [myFeature()]
|
feat = [myFeature()]
|
||||||
pina_net = Network(model=net, input_variables=input_variables,
|
pina_net = Network(model=net, input_variables=input_variables,
|
||||||
output_variables=output_variables, extra_features=feat)
|
output_variables=output_variables, extra_features=feat)
|
||||||
|
|
||||||
|
|
||||||
def test_forward_extrafeat():
|
def test_forward_extrafeat():
|
||||||
net = SimpleNetExtraFeat()
|
net = FeedForward(3, 1)
|
||||||
feat = [myFeature()]
|
feat = [myFeature()]
|
||||||
pina_net = Network(model=net, input_variables=input_variables,
|
pina_net = Network(model=net, input_variables=input_variables,
|
||||||
output_variables=output_variables, extra_features=feat)
|
output_variables=output_variables, extra_features=feat)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from pina import LabelTensor, Condition, Span, PINN
|
from pina import LabelTensor, Condition, CartesianDomain, PINN
|
||||||
from pina.problem import SpatialProblem
|
from pina.problem import SpatialProblem
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.operators import nabla
|
from pina.operators import nabla
|
||||||
@@ -11,7 +11,7 @@ out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
|||||||
|
|
||||||
class Poisson(SpatialProblem):
|
class Poisson(SpatialProblem):
|
||||||
output_variables = ['u']
|
output_variables = ['u']
|
||||||
spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
|
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||||
|
|
||||||
def laplace_equation(input_, output_):
|
def laplace_equation(input_, output_):
|
||||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||||
@@ -25,19 +25,19 @@ class Poisson(SpatialProblem):
|
|||||||
|
|
||||||
conditions = {
|
conditions = {
|
||||||
'gamma1': Condition(
|
'gamma1': Condition(
|
||||||
location=Span({'x': [0, 1], 'y': 1}),
|
location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||||
function=nil_dirichlet),
|
function=nil_dirichlet),
|
||||||
'gamma2': Condition(
|
'gamma2': Condition(
|
||||||
location=Span({'x': [0, 1], 'y': 0}),
|
location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||||
function=nil_dirichlet),
|
function=nil_dirichlet),
|
||||||
'gamma3': Condition(
|
'gamma3': Condition(
|
||||||
location=Span({'x': 1, 'y': [0, 1]}),
|
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||||
function=nil_dirichlet),
|
function=nil_dirichlet),
|
||||||
'gamma4': Condition(
|
'gamma4': Condition(
|
||||||
location=Span({'x': 0, 'y': [0, 1]}),
|
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||||
function=nil_dirichlet),
|
function=nil_dirichlet),
|
||||||
'D': Condition(
|
'D': Condition(
|
||||||
location=Span({'x': [0, 1], 'y': [0, 1]}),
|
location=CartesianDomain({'x': [0, 1], 'y': [0, 1]}),
|
||||||
function=laplace_equation),
|
function=laplace_equation),
|
||||||
'data': Condition(
|
'data': Condition(
|
||||||
input_points=in_,
|
input_points=in_,
|
||||||
@@ -53,15 +53,33 @@ class Poisson(SpatialProblem):
|
|||||||
truth_solution = poisson_sol
|
truth_solution = poisson_sol
|
||||||
|
|
||||||
|
|
||||||
problem = Poisson()
|
class myFeature(torch.nn.Module):
|
||||||
|
"""
|
||||||
|
Feature: sin(x)
|
||||||
|
"""
|
||||||
|
|
||||||
model = FeedForward(problem.input_variables, problem.output_variables)
|
def __init__(self):
|
||||||
|
super(myFeature, self).__init__()
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
t = (torch.sin(x.extract(['x'])*torch.pi) *
|
||||||
|
torch.sin(x.extract(['y'])*torch.pi))
|
||||||
|
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||||
|
|
||||||
|
|
||||||
|
problem = Poisson()
|
||||||
|
model = FeedForward(len(problem.input_variables),len(problem.output_variables))
|
||||||
|
model_extra_feat = FeedForward(len(problem.input_variables) + 1,len(problem.output_variables))
|
||||||
|
|
||||||
|
|
||||||
def test_constructor():
|
def test_constructor():
|
||||||
PINN(problem, model)
|
PINN(problem, model)
|
||||||
|
|
||||||
|
|
||||||
|
def test_constructor_extra_feats():
|
||||||
|
PINN(problem, model_extra_feat, [myFeature()])
|
||||||
|
|
||||||
|
|
||||||
def test_span_pts():
|
def test_span_pts():
|
||||||
pinn = PINN(problem, model)
|
pinn = PINN(problem, model)
|
||||||
n = 10
|
n = 10
|
||||||
@@ -133,6 +151,28 @@ def test_train_2():
|
|||||||
assert list(pinn.history_loss.keys()) == truth_key
|
assert list(pinn.history_loss.keys()) == truth_key
|
||||||
|
|
||||||
|
|
||||||
|
def test_train_extra_feats():
|
||||||
|
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 10
|
||||||
|
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(5)
|
||||||
|
|
||||||
|
|
||||||
|
def test_train_2_extra_feats():
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 10
|
||||||
|
expected_keys = [[], list(range(0, 50, 3))]
|
||||||
|
param = [0, 3]
|
||||||
|
for i, truth_key in zip(param, expected_keys):
|
||||||
|
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||||
|
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(50, save_loss=i)
|
||||||
|
assert list(pinn.history_loss.keys()) == truth_key
|
||||||
|
|
||||||
|
|
||||||
def test_train_with_optimizer_kwargs():
|
def test_train_with_optimizer_kwargs():
|
||||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
n = 10
|
n = 10
|
||||||
|
|||||||
Reference in New Issue
Block a user