Lightining update (#104)
* multiple functions for version 0.0 * lightining update * minor changes * data pinn loss added --------- Co-authored-by: Nicola Demo <demo.nicola@gmail.com> Co-authored-by: Dario Coscia <dariocoscia@cli-10-110-3-125.WIFIeduroamSTUD.units.it> Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.station> Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local> Co-authored-by: Dario Coscia <dariocoscia@192.168.1.38>
This commit is contained in:
committed by
Nicola Demo
parent
0e3625de80
commit
63fd068988
@@ -1,17 +1,18 @@
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
from pina import LabelTensor, Condition, CartesianDomain, PINN
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.model import FeedForward
|
||||
from pina.operators import nabla
|
||||
from pina.geometry import CartesianDomain
|
||||
from pina import Condition, LabelTensor, PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.model import FeedForward
|
||||
from pina.equation.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina.plotter import Plotter
|
||||
from pina.loss import LpLoss
|
||||
|
||||
|
||||
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
|
||||
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||
torch.sin(input_.extract(['y'])*torch.pi))
|
||||
@@ -19,6 +20,8 @@ def laplace_equation(input_, output_):
|
||||
return nabla_u - force_term
|
||||
|
||||
my_laplace = Equation(laplace_equation)
|
||||
in_ = LabelTensor(torch.tensor([[0., 1.]], requires_grad=True), ['x', 'y'])
|
||||
out_ = LabelTensor(torch.tensor([[0.]], requires_grad=True), ['u'])
|
||||
|
||||
class Poisson(SpatialProblem):
|
||||
output_variables = ['u']
|
||||
@@ -68,75 +71,40 @@ class myFeature(torch.nn.Module):
|
||||
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
|
||||
problem = Poisson()
|
||||
model = FeedForward(len(problem.input_variables),len(problem.output_variables))
|
||||
model_extra_feat = FeedForward(len(problem.input_variables) + 1,len(problem.output_variables))
|
||||
# make the problem
|
||||
poisson_problem = Poisson()
|
||||
model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables))
|
||||
model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables))
|
||||
extra_feats = [myFeature()]
|
||||
|
||||
|
||||
def test_constructor():
|
||||
PINN(problem, model)
|
||||
PINN(problem = poisson_problem, model=model, extra_features=None)
|
||||
|
||||
|
||||
def test_constructor_extra_feats():
|
||||
PINN(problem, model_extra_feat, [myFeature()])
|
||||
|
||||
|
||||
def test_span_pts():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert pinn.input_pts[b].shape[0] == n
|
||||
pinn.span_pts(n, 'random', locations=boundaries)
|
||||
for b in boundaries:
|
||||
assert pinn.input_pts[b].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n**2
|
||||
pinn.span_pts(n, 'random', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'latin', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
pinn.span_pts(n, 'lh', locations=['D'])
|
||||
assert pinn.input_pts['D'].shape[0] == n
|
||||
|
||||
|
||||
def test_sampling_all_args():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_all_kwargs():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(n=n, mode='latin', locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_dict():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
pinn.span_pts(
|
||||
{'variables': ['x', 'y'], 'mode': 'grid', 'n': n}, locations=['D'])
|
||||
|
||||
|
||||
def test_sampling_mixed_args_kwargs():
|
||||
pinn = PINN(problem, model)
|
||||
n = 10
|
||||
with pytest.raises(ValueError):
|
||||
pinn.span_pts(n, mode='latin', locations=['D'])
|
||||
|
||||
model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables))
|
||||
PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats)
|
||||
|
||||
def test_train():
|
||||
pinn = PINN(problem, model)
|
||||
poisson_problem = Poisson()
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
|
||||
trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5})
|
||||
trainer.train()
|
||||
|
||||
def test_train_extra_feats():
|
||||
poisson_problem = Poisson()
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn = PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats)
|
||||
trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5})
|
||||
trainer.train()
|
||||
|
||||
"""
|
||||
def test_train_2():
|
||||
@@ -146,8 +114,8 @@ def test_train_2():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model)
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -156,8 +124,8 @@ def test_train_extra_feats():
|
||||
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 10
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
|
||||
|
||||
@@ -168,8 +136,8 @@ def test_train_2_extra_feats():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model_extra_feat, [myFeature()])
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -181,8 +149,8 @@ def test_train_with_optimizer_kwargs():
|
||||
param = [0, 3]
|
||||
for i, truth_key in zip(param, expected_keys):
|
||||
pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -199,8 +167,8 @@ def test_train_with_lr_scheduler():
|
||||
lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
|
||||
lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
|
||||
)
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(50, save_loss=i)
|
||||
assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -209,8 +177,8 @@ def test_train_with_lr_scheduler():
|
||||
# pinn = PINN(problem, model, batch_size=6)
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 10
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(5)
|
||||
|
||||
|
||||
@@ -221,8 +189,8 @@ def test_train_with_lr_scheduler():
|
||||
# param = [0, 3]
|
||||
# for i, truth_key in zip(param, expected_keys):
|
||||
# pinn = PINN(problem, model, batch_size=6)
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(50, save_loss=i)
|
||||
# assert list(pinn.history_loss.keys()) == truth_key
|
||||
|
||||
@@ -233,15 +201,15 @@ if torch.cuda.is_available():
|
||||
# pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
# n = 100
|
||||
# pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
# pinn.span_pts(n, 'grid', locations=['D'])
|
||||
# pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
# pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
# pinn.train(5)
|
||||
|
||||
def test_gpu_train_nobatch():
|
||||
pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||
n = 100
|
||||
pinn.span_pts(n, 'grid', locations=boundaries)
|
||||
pinn.span_pts(n, 'grid', locations=['D'])
|
||||
pinn.discretise_domain(n, 'grid', locations=boundaries)
|
||||
pinn.discretise_domain(n, 'grid', locations=['D'])
|
||||
pinn.train(5)
|
||||
"""
|
||||
Reference in New Issue
Block a user