* Adding a test for all PINN solvers to assert that the metrics are correctly log

* Adding test for Metric Tracker
* Modify Metric Tracker to correctly log metrics
This commit is contained in:
dario-coscia
2024-08-06 11:12:19 +02:00
committed by Nicola Demo
parent d00fb95d6e
commit 0fa4e1e58a
10 changed files with 308 additions and 9 deletions

View File

@@ -1,6 +1,8 @@
"""PINA Callbacks Implementations"""
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.core.module import LightningModule
from pytorch_lightning.trainer.trainer import Trainer
import torch
import copy
@@ -28,17 +30,38 @@ class MetricTracker(Callback):
"""
self._collection = []
def on_train_epoch_end(self, trainer, __):
def on_train_epoch_start(self, trainer, pl_module):
"""
Collect and track metrics at the end of each training epoch.
Collect and track metrics at the start of each training epoch. At epoch
zero the metric is not saved. At epoch ``k`` the metric which is tracked
is the one of epoch ``k-1``.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param _: Placeholder argument.
:param pl_module: Placeholder argument.
:return: None
:rtype: None
"""
super().on_train_epoch_end(trainer, pl_module)
if trainer.current_epoch > 0:
self._collection.append(
copy.deepcopy(trainer.logged_metrics)
) # track them
def on_train_end(self, trainer, pl_module):
"""
Collect and track metrics at the end of training.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param pl_module: Placeholder argument.
:return: None
:rtype: None
"""
super().on_train_end(trainer, pl_module)
if trainer.current_epoch > 0:
self._collection.append(
copy.deepcopy(trainer.logged_metrics)
) # track them

View File

@@ -195,15 +195,20 @@ class AbstractProblem(metaclass=ABCMeta):
)
# check consistency location
locations_to_sample = [
condition for condition in self.conditions
if hasattr(self.conditions[condition], 'location')
]
if locations == "all":
locations = [condition for condition in self.conditions]
# only locations that can be sampled
locations = locations_to_sample
else:
check_consistency(locations, str)
if sorted(locations) != sorted(self.conditions):
if sorted(locations) != sorted(locations_to_sample):
TypeError(
f"Wrong locations for sampling. Location ",
f"should be in {self.conditions}.",
f"should be in {locations_to_sample}.",
)
# sampling

View File

@@ -0,0 +1,87 @@
import torch
import pytest
from pina.problem import SpatialProblem
from pina.operators import laplacian
from pina.geometry import CartesianDomain
from pina import Condition, LabelTensor
from pina.solvers import PINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.equation.equation import Equation
from pina.equation.equation_factory import FixedValue
from pina.callbacks import MetricTracker
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
torch.sin(input_.extract(['y']) * torch.pi))
delta_u = laplacian(output_.extract(['u']), input_)
return delta_u - force_term
my_laplace = Equation(laplace_equation)
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
conditions = {
'gamma1': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 1}),
equation=FixedValue(0.0)),
'gamma2': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 0}),
equation=FixedValue(0.0)),
'gamma3': Condition(
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'gamma4': Condition(
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'D': Condition(
input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
equation=my_laplace),
'data': Condition(
input_points=in_,
output_points=out_)
}
# make the problem
poisson_problem = Poisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
model = FeedForward(len(poisson_problem.input_variables),
len(poisson_problem.output_variables))
# make the solver
solver = PINN(problem=poisson_problem, model=model)
def test_metric_tracker_constructor():
MetricTracker()
def test_metric_tracker_routine():
# make the trainer
trainer = Trainer(solver=solver,
callbacks=[
MetricTracker()
],
accelerator='cpu',
max_epochs=5)
trainer.train()
# get the tracked metrics
metrics = trainer.callbacks[0].metrics
# assert the logged metrics are correct
logged_metrics = sorted(list(metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics

View File

@@ -0,0 +1,113 @@
import torch
import pytest
from pina import Condition, LabelTensor, Trainer
from pina.problem import SpatialProblem
from pina.operators import laplacian
from pina.geometry import CartesianDomain
from pina.model import FeedForward
from pina.solvers import PINNInterface
from pina.equation.equation import Equation
from pina.equation.equation_factory import FixedValue
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x']) * torch.pi) *
torch.sin(input_.extract(['y']) * torch.pi))
delta_u = laplacian(output_.extract(['u']), input_)
return delta_u - force_term
my_laplace = Equation(laplace_equation)
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y'])
out2_ = LabelTensor(torch.rand(60, 1), ['u'])
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
conditions = {
'gamma1': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 1}),
equation=FixedValue(0.0)),
'gamma2': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 0}),
equation=FixedValue(0.0)),
'gamma3': Condition(
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'gamma4': Condition(
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'D': Condition(
input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
equation=my_laplace),
'data': Condition(
input_points=in_,
output_points=out_),
'data2': Condition(
input_points=in2_,
output_points=out2_)
}
def poisson_sol(self, pts):
return -(torch.sin(pts.extract(['x']) * torch.pi) *
torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
truth_solution = poisson_sol
class FOOPINN(PINNInterface):
def __init__(self, model, problem):
super().__init__(models=[model], problem=problem,
optimizers=[torch.optim.Adam],
optimizers_kwargs=[{'lr' : 0.001}],
extra_features=None,
loss=torch.nn.MSELoss())
def forward(self, x):
return self.models[0](x)
def loss_phys(self, samples, equation):
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
self.store_log(loss_value=float(loss_value))
return loss_value
def configure_optimizers(self):
return self.optimizers, []
# make the problem
poisson_problem = Poisson()
poisson_problem.discretise_domain(100)
model = FeedForward(len(poisson_problem.input_variables),
len(poisson_problem.output_variables))
model_extra_feats = FeedForward(
len(poisson_problem.input_variables) + 1,
len(poisson_problem.output_variables))
def test_constructor():
with pytest.raises(TypeError):
PINNInterface()
# a simple pinn built with PINNInterface
FOOPINN(model, poisson_problem)
def test_train_step():
solver = FOOPINN(model, poisson_problem)
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
def test_log():
solver = FOOPINN(model, poisson_problem)
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics

View File

@@ -138,6 +138,18 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
problem.discretise_domain(100)
solver = CausalPINN(problem = problem,
model=model, extra_features=None, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"

View File

@@ -163,6 +163,17 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
poisson_problem.discretise_domain(100)
solver = PINN(problem = poisson_problem, model=model, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"

View File

@@ -160,6 +160,18 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
poisson_problem.discretise_domain(100)
solver = GPINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"

View File

@@ -161,6 +161,18 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
poisson_problem.discretise_domain(100)
solver = PINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"

View File

@@ -165,6 +165,18 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
poisson_problem.discretise_domain(100)
solver = PINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"

View File

@@ -165,6 +165,18 @@ def test_train_cpu():
accelerator='cpu', batch_size=20)
trainer.train()
def test_log():
poisson_problem.discretise_domain(100)
solver = PINN(problem = poisson_problem, model=model,
extra_features=None, loss=LpLoss())
trainer = Trainer(solver, max_epochs=2, accelerator='cpu')
trainer.train()
# assert the logged metrics are correct
logged_metrics = sorted(list(trainer.logged_metrics.keys()))
total_metrics = sorted(
list([key + '_loss' for key in poisson_problem.conditions.keys()])
+ ['mean_loss'])
assert logged_metrics == total_metrics
def test_train_restore():
tmpdir = "tests/tmp_restore"