PINN variants addition and Solvers Update (#263)

* gpinn/basepinn new classes, pinn restructure
* codacy fix gpinn/basepinn/pinn
* inverse problem fix
* Causal PINN (#267)
* fix GPU training in inverse problem (#283)
* Create a `compute_residual` attribute for `PINNInterface`
* Modify dataloading in solvers (#286)
* Modify PINNInterface by removing _loss_phys, _loss_data
* Adding in PINNInterface a variable to track the current condition during training
* Modify GPINN,PINN,CausalPINN to match changes in PINNInterface
* Competitive Pinn Addition (#288)
* fixing after rebase/ fix loss
* fixing final issues

---------

Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local>

* Modify min max formulation to max min for paper consistency
* Adding SAPINN solver (#291)
* rom solver
* fix import

---------

Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local>
Co-authored-by: Anna Ivagnes <75523024+annaivagnes@users.noreply.github.com>
Co-authored-by: valc89 <103250118+valc89@users.noreply.github.com>
Co-authored-by: Monthly Tag bot <mtbot@noreply.github.com>
Co-authored-by: Nicola Demo <demo.nicola@gmail.com>
This commit is contained in:
Dario Coscia
2024-05-10 14:07:01 +02:00
committed by GitHub
parent 39dc6c4d86
commit e0429bb445
29 changed files with 3837 additions and 357 deletions

View File

@@ -1,6 +1,6 @@
import torch
from pina.problem import SpatialProblem
from pina.problem import SpatialProblem, InverseProblem
from pina.operators import laplacian
from pina.geometry import CartesianDomain
from pina import Condition, LabelTensor
@@ -26,6 +26,58 @@ in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
out2_ = LabelTensor(torch.rand(60, 1), ['u'])
class InversePoisson(SpatialProblem, InverseProblem):
'''
Problem definition for the Poisson equation.
'''
output_variables = ['u']
x_min = -2
x_max = 2
y_min = -2
y_max = 2
data_input = LabelTensor(torch.rand(10, 2), ['x', 'y'])
data_output = LabelTensor(torch.rand(10, 1), ['u'])
spatial_domain = CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]})
# define the ranges for the parameters
unknown_parameter_domain = CartesianDomain({'mu1': [-1, 1], 'mu2': [-1, 1]})
def laplace_equation(input_, output_, params_):
'''
Laplace equation with a force term.
'''
force_term = torch.exp(
- 2*(input_.extract(['x']) - params_['mu1'])**2
- 2*(input_.extract(['y']) - params_['mu2'])**2)
delta_u = laplacian(output_, input_, components=['u'], d=['x', 'y'])
return delta_u - force_term
# define the conditions for the loss (boundary conditions, equation, data)
conditions = {
'gamma1': Condition(location=CartesianDomain({'x': [x_min, x_max],
'y': y_max}),
equation=FixedValue(0.0, components=['u'])),
'gamma2': Condition(location=CartesianDomain(
{'x': [x_min, x_max], 'y': y_min
}),
equation=FixedValue(0.0, components=['u'])),
'gamma3': Condition(location=CartesianDomain(
{'x': x_max, 'y': [y_min, y_max]
}),
equation=FixedValue(0.0, components=['u'])),
'gamma4': Condition(location=CartesianDomain(
{'x': x_min, 'y': [y_min, y_max]
}),
equation=FixedValue(0.0, components=['u'])),
'D': Condition(location=CartesianDomain(
{'x': [x_min, x_max], 'y': [y_min, y_max]
}),
equation=Equation(laplace_equation)),
'data': Condition(input_points=data_input.extract(['x', 'y']),
output_points=data_output)
}
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
@@ -103,8 +155,10 @@ def test_train_cpu():
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
trainer = Trainer(solver=pinn, max_epochs=1, accelerator='cpu', batch_size=20)
pinn = PINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver=pinn, max_epochs=1,
accelerator='cpu', batch_size=20)
trainer.train()
@@ -125,7 +179,8 @@ def test_train_restore():
trainer.train()
ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu')
t = ntrainer.train(
ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
ckpt_path=f'{tmpdir}/lightning_logs/version_0/'
'checkpoints/epoch=4-step=10.ckpt')
import shutil
shutil.rmtree(tmpdir)
@@ -158,6 +213,68 @@ def test_train_load():
import shutil
shutil.rmtree(tmpdir)
def test_train_inverse_problem_cpu():
poisson_problem = InversePoisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
n = 100
poisson_problem.discretise_domain(n, 'random', locations=boundaries)
pinn = PINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver=pinn, max_epochs=1,
accelerator='cpu', batch_size=20)
trainer.train()
# # TODO does not currently work
# def test_train_inverse_problem_restore():
# tmpdir = "tests/tmp_restore_inv"
# poisson_problem = InversePoisson()
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
# n = 100
# poisson_problem.discretise_domain(n, 'random', locations=boundaries)
# pinn = PINN(problem=poisson_problem,
# model=model,
# extra_features=None,
# loss=LpLoss())
# trainer = Trainer(solver=pinn,
# max_epochs=5,
# accelerator='cpu',
# default_root_dir=tmpdir)
# trainer.train()
# ntrainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
# t = ntrainer.train(
# ckpt_path=f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=4-step=10.ckpt')
# import shutil
# shutil.rmtree(tmpdir)
def test_train_inverse_problem_load():
tmpdir = "tests/tmp_load_inv"
poisson_problem = InversePoisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4', 'D']
n = 100
poisson_problem.discretise_domain(n, 'random', locations=boundaries)
pinn = PINN(problem=poisson_problem,
model=model,
extra_features=None,
loss=LpLoss())
trainer = Trainer(solver=pinn,
max_epochs=15,
accelerator='cpu',
default_root_dir=tmpdir)
trainer.train()
new_pinn = PINN.load_from_checkpoint(
f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt',
problem = poisson_problem, model=model)
test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10)
assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1)
assert new_pinn.forward(test_pts).extract(
['u']).shape == pinn.forward(test_pts).extract(['u']).shape
torch.testing.assert_close(
new_pinn.forward(test_pts).extract(['u']),
pinn.forward(test_pts).extract(['u']))
import shutil
shutil.rmtree(tmpdir)
# # TODO fix asap. Basically sampling few variables
# # works only if both variables are in a range.
@@ -197,85 +314,32 @@ def test_train_extra_feats_cpu():
# pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
# trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
# trainer.train()
"""
def test_train_gpu(): #TODO fix ASAP
poisson_problem = Poisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
trainer.train()
def test_train_2():
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
expected_keys = [[], list(range(0, 50, 3))]
param = [0, 3]
for i, truth_key in zip(param, expected_keys):
pinn = PINN(problem, model)
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(50, save_loss=i)
assert list(pinn.history_loss.keys()) == truth_key
# def test_train_gpu(): #TODO fix ASAP
# poisson_problem = Poisson()
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
# poisson_problem.conditions.pop('data') # The input/output pts are allocated on cpu
# pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss())
# trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'})
# trainer.train()
# def test_train_2():
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# expected_keys = [[], list(range(0, 50, 3))]
# param = [0, 3]
# for i, truth_key in zip(param, expected_keys):
# pinn = PINN(problem, model)
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(50, save_loss=i)
# assert list(pinn.history_loss.keys()) == truth_key
def test_train_extra_feats():
pinn = PINN(problem, model_extra_feat, [myFeature()])
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(5)
def test_train_2_extra_feats():
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
expected_keys = [[], list(range(0, 50, 3))]
param = [0, 3]
for i, truth_key in zip(param, expected_keys):
pinn = PINN(problem, model_extra_feat, [myFeature()])
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(50, save_loss=i)
assert list(pinn.history_loss.keys()) == truth_key
def test_train_with_optimizer_kwargs():
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
expected_keys = [[], list(range(0, 50, 3))]
param = [0, 3]
for i, truth_key in zip(param, expected_keys):
pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(50, save_loss=i)
assert list(pinn.history_loss.keys()) == truth_key
def test_train_with_lr_scheduler():
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
expected_keys = [[], list(range(0, 50, 3))]
param = [0, 3]
for i, truth_key in zip(param, expected_keys):
pinn = PINN(
problem,
model,
lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
)
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(50, save_loss=i)
assert list(pinn.history_loss.keys()) == truth_key
# def test_train_batch():
# pinn = PINN(problem, model, batch_size=6)
# def test_train_extra_feats():
# pinn = PINN(problem, model_extra_feat, [myFeature()])
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# pinn.discretise_domain(n, 'grid', locations=boundaries)
@@ -283,34 +347,87 @@ def test_train_with_lr_scheduler():
# pinn.train(5)
# def test_train_batch_2():
# def test_train_2_extra_feats():
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# expected_keys = [[], list(range(0, 50, 3))]
# param = [0, 3]
# for i, truth_key in zip(param, expected_keys):
# pinn = PINN(problem, model, batch_size=6)
# pinn = PINN(problem, model_extra_feat, [myFeature()])
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(50, save_loss=i)
# assert list(pinn.history_loss.keys()) == truth_key
if torch.cuda.is_available():
# def test_train_with_optimizer_kwargs():
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# expected_keys = [[], list(range(0, 50, 3))]
# param = [0, 3]
# for i, truth_key in zip(param, expected_keys):
# pinn = PINN(problem, model, optimizer_kwargs={'lr' : 0.3})
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(50, save_loss=i)
# assert list(pinn.history_loss.keys()) == truth_key
# def test_gpu_train():
# pinn = PINN(problem, model, batch_size=20, device='cuda')
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 100
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(5)
def test_gpu_train_nobatch():
pinn = PINN(problem, model, batch_size=None, device='cuda')
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 100
pinn.discretise_domain(n, 'grid', locations=boundaries)
pinn.discretise_domain(n, 'grid', locations=['D'])
pinn.train(5)
"""
# def test_train_with_lr_scheduler():
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 10
# expected_keys = [[], list(range(0, 50, 3))]
# param = [0, 3]
# for i, truth_key in zip(param, expected_keys):
# pinn = PINN(
# problem,
# model,
# lr_scheduler_type=torch.optim.lr_scheduler.CyclicLR,
# lr_scheduler_kwargs={'base_lr' : 0.1, 'max_lr' : 0.3, 'cycle_momentum': False}
# )
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(50, save_loss=i)
# assert list(pinn.history_loss.keys()) == truth_key
# # def test_train_batch():
# # pinn = PINN(problem, model, batch_size=6)
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# # n = 10
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
# # pinn.discretise_domain(n, 'grid', locations=['D'])
# # pinn.train(5)
# # def test_train_batch_2():
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# # n = 10
# # expected_keys = [[], list(range(0, 50, 3))]
# # param = [0, 3]
# # for i, truth_key in zip(param, expected_keys):
# # pinn = PINN(problem, model, batch_size=6)
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
# # pinn.discretise_domain(n, 'grid', locations=['D'])
# # pinn.train(50, save_loss=i)
# # assert list(pinn.history_loss.keys()) == truth_key
# if torch.cuda.is_available():
# # def test_gpu_train():
# # pinn = PINN(problem, model, batch_size=20, device='cuda')
# # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# # n = 100
# # pinn.discretise_domain(n, 'grid', locations=boundaries)
# # pinn.discretise_domain(n, 'grid', locations=['D'])
# # pinn.train(5)
# def test_gpu_train_nobatch():
# pinn = PINN(problem, model, batch_size=None, device='cuda')
# boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
# n = 100
# pinn.discretise_domain(n, 'grid', locations=boundaries)
# pinn.discretise_domain(n, 'grid', locations=['D'])
# pinn.train(5)