Lightining update (#104)
* multiple functions for version 0.0 * lightining update * minor changes * data pinn loss added --------- Co-authored-by: Nicola Demo <demo.nicola@gmail.com> Co-authored-by: Dario Coscia <dariocoscia@cli-10-110-3-125.WIFIeduroamSTUD.units.it> Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.station> Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local> Co-authored-by: Dario Coscia <dariocoscia@192.168.1.38>
This commit is contained in:
committed by
Nicola Demo
parent
0e3625de80
commit
63fd068988
49
tests/test_loss.py
Normal file
49
tests/test_loss.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.loss import *
|
||||
|
||||
input = torch.tensor([[3.], [1.], [-8.]])
|
||||
target = torch.tensor([[6.], [4.], [2.]])
|
||||
available_reductions = ['str', 'mean', 'none']
|
||||
|
||||
|
||||
def test_LpLoss_constructor():
|
||||
# test reduction
|
||||
for reduction in available_reductions:
|
||||
LpLoss(reduction=reduction)
|
||||
# test p
|
||||
for p in [float('inf'), -float('inf'), 1, 10, -8]:
|
||||
LpLoss(p=p)
|
||||
|
||||
def test_LpLoss_forward():
|
||||
# l2 loss
|
||||
loss = LpLoss(p=2, reduction='mean')
|
||||
l2_loss = torch.mean(torch.sqrt((input-target).pow(2)))
|
||||
assert loss(input, target) == l2_loss
|
||||
# l1 loss
|
||||
loss = LpLoss(p=1, reduction='sum')
|
||||
l1_loss = torch.sum(torch.abs(input-target))
|
||||
assert loss(input, target) == l1_loss
|
||||
|
||||
def test_LpRelativeLoss_constructor():
|
||||
# test reduction
|
||||
for reduction in available_reductions:
|
||||
LpLoss(reduction=reduction, relative=True)
|
||||
# test p
|
||||
for p in [float('inf'), -float('inf'), 1, 10, -8]:
|
||||
LpLoss(p=p,relative=True)
|
||||
|
||||
def test_LpRelativeLoss_forward():
|
||||
# l2 relative loss
|
||||
loss = LpLoss(p=2, reduction='mean',relative=True)
|
||||
l2_loss = torch.sqrt((input-target).pow(2))/torch.sqrt(input.pow(2))
|
||||
assert loss(input, target) == torch.mean(l2_loss)
|
||||
# l1 relative loss
|
||||
loss = LpLoss(p=1, reduction='sum',relative=True)
|
||||
l1_loss = torch.abs(input-target)/torch.abs(input)
|
||||
assert loss(input, target) == torch.sum(l1_loss)
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pytest
|
||||
from pina.model import Network, FeedForward
|
||||
from pina import LabelTensor
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
t = (torch.sin(x.extract(['x'])*torch.pi) *
|
||||
torch.sin(x.extract(['y'])*torch.pi))
|
||||
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
input_variables = ['x', 'y']
|
||||
output_variables = ['u']
|
||||
data = torch.rand((20, 2))
|
||||
input_ = LabelTensor(data, input_variables)
|
||||
|
||||
|
||||
def test_constructor():
|
||||
net = FeedForward(2, 1)
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables)
|
||||
|
||||
|
||||
def test_forward():
|
||||
net = FeedForward(2, 1)
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables)
|
||||
output_ = pina_net(input_)
|
||||
assert output_.labels == output_variables
|
||||
|
||||
|
||||
def test_constructor_extrafeat():
|
||||
net = FeedForward(3, 1)
|
||||
feat = [myFeature()]
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables, extra_features=feat)
|
||||
|
||||
|
||||
def test_forward_extrafeat():
|
||||
net = FeedForward(3, 1)
|
||||
feat = [myFeature()]
|
||||
pina_net = Network(model=net, input_variables=input_variables,
|
||||
output_variables=output_variables, extra_features=feat)
|
||||
output_ = pina_net(input_)
|
||||
assert output_.labels == output_variables
|
||||
@@ -1,17 +1,18 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina import LabelTensor, Condition, CartesianDomain, PINN
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.model import FeedForward
|
||||
from pina.operators import nabla
|
||||
from pina.geometry import CartesianDomain
|
||||
from pina import Condition, LabelTensor, PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.plotter import Plotter
|
||||
from pina.loss import LpLoss
|
||||
|
||||
|
||||
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||
torch.sin(input_.extract(['y'])*torch.pi))
|
||||
@@ -19,6 +20,8 @@ def laplace_equation(input_, output_):
|
||||
return nabla_u - force_term
|
||||
|
||||
my_laplace = Equation(laplace_equation)
|
||||
in_ = LabelTensor(torch.tensor([[0., 1.]], requires_grad=True), ['x', 'y'])
|
||||
out_ = LabelTensor(torch.tensor([[0.]], requires_grad=True), ['u'])
|
||||
|
||||
class Poisson(SpatialProblem):
|
||||
output_variables = ['u']
|
||||
@@ -68,75 +71,40 @@ class myFeature(torch.nn.Module):
|
||||
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
problem = Poisson()
|
||||
model = FeedForward(len(problem.input_variables),len(problem.output_variables))
|
||||
model_extra_feat = FeedForward(len(problem.input_variables) + 1,len(problem.output_variables))
|
||||
# make the problem
|
||||
poisson_problem = Poisson()
|
||||
model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables))
|
||||
model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables))
|
||||
extra_feats = [myFeature()]
|
||||
|
||||
|
||||
def test_constructor():
|
||||
PINN(problem, model)
|
||||
PINN(problem = poisson_problem, model=model, extra_features=None)
|
||||
|
||||
|
||||
def test_constructor_extra_feats():
|
||||
PINN(problem, model_extra_feat, [myFeature()])
|
||||
|
||||
|
||||
def test_span_pts():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert pinn.input_pts[b].shape[0] == n
|
||||
pinn.span_pts(n, 'random', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert pinn.input_pts[b].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n**2
|
||||
pinn.span_pts(n, 'random', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'latin', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'lh', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
|
||||
def test_sampling_all_args():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_all_kwargs():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(n=n, mode='latin', locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_dict():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(
|
||||
{'variables': ['x', 'y'], 'mode': 'grid', 'n': n}, locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_mixed_args_kwargs():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
with pytest.raises(ValueError):
|
||||
pinn.span_pts(n, mode='latin', locations=['D'])
|
||||
|
||||
model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables))
|
||||
PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats)
|
||||
|
||||
def test_train():
|
||||
pinn = PINN(problem, model)
|
||||
poisson_problem = Poisson()
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5})
|
||||
trainer.train()
|
||||
|
||||
def test_train_extra_feats():
|
||||
poisson_problem = Poisson()
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn = PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats)
|
||||
trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5})
|
||||
trainer.train()
|
||||
|
||||
"""
|
||||
def test_train_2():
|
||||
@@ -146,8 +114,8 @@ def test_train_2():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model)
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -156,8 +124,8 @@ def test_train_extra_feats():
|
||||
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
|
||||
|
||||
@@ -168,8 +136,8 @@ def test_train_2_extra_feats():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -181,8 +149,8 @@ def test_train_with_optimizer_kwargs():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -199,8 +167,8 @@ def test_train_with_lr_scheduler():
|
||||
lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
)
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -209,8 +177,8 @@ def test_train_with_lr_scheduler():
|
||||
# pinn = PINN(problem, model, batch_size=6)
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(5)
|
||||
|
||||
|
||||
@@ -221,8 +189,8 @@ def test_train_with_lr_scheduler():
|
||||
# param = [0, 3]
|
||||
# for i, truth_key in zip(param, expected_keys):
|
||||
# pinn = PINN(problem, model, batch_size=6)
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(50, save_loss=i)
|
||||
# assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -233,15 +201,15 @@ if torch.cuda.is_available():
|
||||
# pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 100
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(5)
|
||||
|
||||
def test_gpu_train_nobatch():
|
||||
pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 100
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
"""
|
||||
97
tests/test_problem.py
Normal file
97
tests/test_problem.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operators import nabla
|
||||
from pina import LabelTensor, Condition
|
||||
from pina.geometry import CartesianDomain
|
||||
from pina.equation.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||
torch.sin(input_.extract(['y'])*torch.pi))
|
||||
nabla_u = nabla(output_.extract(['u']), input_)
|
||||
return nabla_u - force_term
|
||||
|
||||
my_laplace = Equation(laplace_equation)
|
||||
in_ = LabelTensor(torch.tensor([[0., 1.]], requires_grad=True), ['x', 'y'])
|
||||
out_ = LabelTensor(torch.tensor([[0.]], requires_grad=True), ['u'])
|
||||
|
||||
class Poisson(SpatialProblem):
|
||||
output_variables = ['u']
|
||||
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(
|
||||
location=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma2': Condition(
|
||||
location=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma3': Condition(
|
||||
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma4': Condition(
|
||||
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
equation=FixedValue(0.0)),
|
||||
'D': Condition(
|
||||
location=CartesianDomain({'x': [0, 1], 'y': [0, 1]}),
|
||||
equation=my_laplace),
|
||||
'data': Condition(
|
||||
input_points=in_,
|
||||
output_points=out_)
|
||||
}
|
||||
|
||||
def poisson_sol(self, pts):
|
||||
return -(
|
||||
torch.sin(pts.extract(['x'])*torch.pi) *
|
||||
torch.sin(pts.extract(['y'])*torch.pi)
|
||||
)/(2*torch.pi**2)
|
||||
|
||||
truth_solution = poisson_sol
|
||||
|
||||
|
||||
# make the problem
|
||||
poisson_problem = Poisson()
|
||||
|
||||
|
||||
def test_discretise_domain():
|
||||
n = 10
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert poisson_problem.input_pts[b].shape[0] == n
|
||||
poisson_problem.discretise_domain(n, 'random', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert poisson_problem.input_pts[b].shape[0] == n
|
||||
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n**2
|
||||
poisson_problem.discretise_domain(n, 'random', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n
|
||||
|
||||
poisson_problem.discretise_domain(n, 'latin', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n
|
||||
|
||||
poisson_problem.discretise_domain(n, 'lh', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n
|
||||
|
||||
def test_sampling_all_args():
|
||||
n = 10
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
|
||||
def test_sampling_all_kwargs():
|
||||
n = 10
|
||||
poisson_problem.discretise_domain(n=n, mode='latin', locations=['D'])
|
||||
|
||||
def test_sampling_dict():
|
||||
n = 10
|
||||
poisson_problem.discretise_domain(
|
||||
{'variables': ['x', 'y'], 'mode': 'grid', 'n': n}, locations=['D'])
|
||||
|
||||
def test_sampling_mixed_args_kwargs():
|
||||
n = 10
|
||||
with pytest.raises(ValueError):
|
||||
poisson_problem.discretise_domain(n, mode='latin', locations=['D'])
|
||||
Reference in New Issue
Block a user