From 0fa4e1e58aeb903b5710d1aec9f634f2ff0e677d Mon Sep 17 00:00:00 2001 From: dario-coscia Date: Tue, 6 Aug 2024 11:12:19 +0200 Subject: [PATCH] * Adding a test for all PINN solvers to assert that the metrics are correctly log * Adding test for Metric Tracker * Modify Metric Tracker to correctly log metrics --- pina/callbacks/processing_callbacks.py | 35 ++++-- pina/problem/abstract_problem.py | 11 +- tests/test_callbacks/test_metric_tracker.py | 87 +++++++++++++++ tests/test_solvers/test_basepinn.py | 113 ++++++++++++++++++++ tests/test_solvers/test_causalpinn.py | 12 +++ tests/test_solvers/test_competitive_pinn.py | 11 ++ tests/test_solvers/test_gpinn.py | 12 +++ tests/test_solvers/test_pinn.py | 12 +++ tests/test_solvers/test_rba_pinn.py | 12 +++ tests/test_solvers/test_sapinn.py | 12 +++ 10 files changed, 308 insertions(+), 9 deletions(-) create mode 100644 tests/test_callbacks/test_metric_tracker.py create mode 100644 tests/test_solvers/test_basepinn.py diff --git a/pina/callbacks/processing_callbacks.py b/pina/callbacks/processing_callbacks.py index 0a7359c..3b86936 100644 --- a/pina/callbacks/processing_callbacks.py +++ b/pina/callbacks/processing_callbacks.py @@ -1,6 +1,8 @@ """PINA Callbacks Implementations""" from pytorch_lightning.callbacks import Callback +from pytorch_lightning.core.module import LightningModule +from pytorch_lightning.trainer.trainer import Trainer import torch import copy @@ -28,20 +30,41 @@ class MetricTracker(Callback): """ self._collection = [] - def on_train_epoch_end(self, trainer, __): + def on_train_epoch_start(self, trainer, pl_module): """ - Collect and track metrics at the end of each training epoch. + Collect and track metrics at the start of each training epoch. At epoch + zero the metric is not saved. At epoch ``k`` the metric which is tracked + is the one of epoch ``k-1``. :param trainer: The trainer object managing the training process. :type trainer: pytorch_lightning.Trainer - :param _: Placeholder argument. + :param pl_module: Placeholder argument. :return: None :rtype: None """ - self._collection.append( - copy.deepcopy(trainer.logged_metrics) - ) # track them + super().on_train_epoch_end(trainer, pl_module) + if trainer.current_epoch > 0: + self._collection.append( + copy.deepcopy(trainer.logged_metrics) + ) # track them + + def on_train_end(self, trainer, pl_module): + """ + Collect and track metrics at the end of training. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + :param pl_module: Placeholder argument. + + :return: None + :rtype: None + """ + super().on_train_end(trainer, pl_module) + if trainer.current_epoch > 0: + self._collection.append( + copy.deepcopy(trainer.logged_metrics) + ) # track them @property def metrics(self): diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index e43ac24..8b98ec9 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -195,15 +195,20 @@ class AbstractProblem(metaclass=ABCMeta): ) # check consistency location + locations_to_sample = [ + condition for condition in self.conditions + if hasattr(self.conditions[condition], 'location') + ] if locations == "all": - locations = [condition for condition in self.conditions] + # only locations that can be sampled + locations = locations_to_sample else: check_consistency(locations, str) - if sorted(locations) != sorted(self.conditions): + if sorted(locations) != sorted(locations_to_sample): TypeError( f"Wrong locations for sampling. Location ", - f"should be in {self.conditions}.", + f"should be in {locations_to_sample}.", ) # sampling diff --git a/tests/test_callbacks/test_metric_tracker.py b/tests/test_callbacks/test_metric_tracker.py new file mode 100644 index 0000000..c380245 --- /dev/null +++ b/tests/test_callbacks/test_metric_tracker.py @@ -0,0 +1,87 @@ +import torch +import pytest + +from pina.problem import SpatialProblem +from pina.operators import laplacian +from pina.geometry import CartesianDomain +from pina import Condition, LabelTensor +from pina.solvers import PINN +from pina.trainer import Trainer +from pina.model import FeedForward +from pina.equation.equation import Equation +from pina.equation.equation_factory import FixedValue +from pina.callbacks import MetricTracker + + +def laplace_equation(input_, output_): + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) + delta_u = laplacian(output_.extract(['u']), input_) + return delta_u - force_term + + +my_laplace = Equation(laplace_equation) +in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) +out_ = LabelTensor(torch.tensor([[0.]]), ['u']) + + +class Poisson(SpatialProblem): + output_variables = ['u'] + spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) + + conditions = { + 'gamma1': Condition( + location=CartesianDomain({'x': [0, 1], 'y': 1}), + equation=FixedValue(0.0)), + 'gamma2': Condition( + location=CartesianDomain({'x': [0, 1], 'y': 0}), + equation=FixedValue(0.0)), + 'gamma3': Condition( + location=CartesianDomain({'x': 1, 'y': [0, 1]}), + equation=FixedValue(0.0)), + 'gamma4': Condition( + location=CartesianDomain({'x': 0, 'y': [0, 1]}), + equation=FixedValue(0.0)), + 'D': Condition( + input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']), + equation=my_laplace), + 'data': Condition( + input_points=in_, + output_points=out_) + } + + +# make the problem +poisson_problem = Poisson() +boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] +n = 10 +poisson_problem.discretise_domain(n, 'grid', locations=boundaries) +model = FeedForward(len(poisson_problem.input_variables), + len(poisson_problem.output_variables)) + +# make the solver +solver = PINN(problem=poisson_problem, model=model) + + +def test_metric_tracker_constructor(): + MetricTracker() + +def test_metric_tracker_routine(): + # make the trainer + trainer = Trainer(solver=solver, + callbacks=[ + MetricTracker() + ], + accelerator='cpu', + max_epochs=5) + trainer.train() + # get the tracked metrics + metrics = trainer.callbacks[0].metrics + # assert the logged metrics are correct + logged_metrics = sorted(list(metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics + + diff --git a/tests/test_solvers/test_basepinn.py b/tests/test_solvers/test_basepinn.py new file mode 100644 index 0000000..e7f820d --- /dev/null +++ b/tests/test_solvers/test_basepinn.py @@ -0,0 +1,113 @@ +import torch +import pytest + +from pina import Condition, LabelTensor, Trainer +from pina.problem import SpatialProblem +from pina.operators import laplacian +from pina.geometry import CartesianDomain +from pina.model import FeedForward +from pina.solvers import PINNInterface +from pina.equation.equation import Equation +from pina.equation.equation_factory import FixedValue + +def laplace_equation(input_, output_): + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) + delta_u = laplacian(output_.extract(['u']), input_) + return delta_u - force_term + + +my_laplace = Equation(laplace_equation) +in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) +out_ = LabelTensor(torch.tensor([[0.]]), ['u']) +in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y']) +out2_ = LabelTensor(torch.rand(60, 1), ['u']) + + + +class Poisson(SpatialProblem): + output_variables = ['u'] + spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) + + conditions = { + 'gamma1': Condition( + location=CartesianDomain({'x': [0, 1], 'y': 1}), + equation=FixedValue(0.0)), + 'gamma2': Condition( + location=CartesianDomain({'x': [0, 1], 'y': 0}), + equation=FixedValue(0.0)), + 'gamma3': Condition( + location=CartesianDomain({'x': 1, 'y': [0, 1]}), + equation=FixedValue(0.0)), + 'gamma4': Condition( + location=CartesianDomain({'x': 0, 'y': [0, 1]}), + equation=FixedValue(0.0)), + 'D': Condition( + input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']), + equation=my_laplace), + 'data': Condition( + input_points=in_, + output_points=out_), + 'data2': Condition( + input_points=in2_, + output_points=out2_) + } + + def poisson_sol(self, pts): + return -(torch.sin(pts.extract(['x']) * torch.pi) * + torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2) + + truth_solution = poisson_sol + +class FOOPINN(PINNInterface): + def __init__(self, model, problem): + super().__init__(models=[model], problem=problem, + optimizers=[torch.optim.Adam], + optimizers_kwargs=[{'lr' : 0.001}], + extra_features=None, + loss=torch.nn.MSELoss()) + def forward(self, x): + return self.models[0](x) + + def loss_phys(self, samples, equation): + residual = self.compute_residual(samples=samples, equation=equation) + loss_value = self.loss( + torch.zeros_like(residual, requires_grad=True), residual + ) + self.store_log(loss_value=float(loss_value)) + return loss_value + + def configure_optimizers(self): + return self.optimizers, [] + +# make the problem +poisson_problem = Poisson() +poisson_problem.discretise_domain(100) +model = FeedForward(len(poisson_problem.input_variables), + len(poisson_problem.output_variables)) +model_extra_feats = FeedForward( + len(poisson_problem.input_variables) + 1, + len(poisson_problem.output_variables)) + + +def test_constructor(): + with pytest.raises(TypeError): + PINNInterface() + # a simple pinn built with PINNInterface + FOOPINN(model, poisson_problem) + +def test_train_step(): + solver = FOOPINN(model, poisson_problem) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + +def test_log(): + solver = FOOPINN(model, poisson_problem) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics \ No newline at end of file diff --git a/tests/test_solvers/test_causalpinn.py b/tests/test_solvers/test_causalpinn.py index 58518ae..bad5255 100644 --- a/tests/test_solvers/test_causalpinn.py +++ b/tests/test_solvers/test_causalpinn.py @@ -138,6 +138,18 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + problem.discretise_domain(100) + solver = CausalPINN(problem = problem, + model=model, extra_features=None, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore" diff --git a/tests/test_solvers/test_competitive_pinn.py b/tests/test_solvers/test_competitive_pinn.py index 9c6f6c8..fae6d43 100644 --- a/tests/test_solvers/test_competitive_pinn.py +++ b/tests/test_solvers/test_competitive_pinn.py @@ -163,6 +163,17 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + poisson_problem.discretise_domain(100) + solver = PINN(problem = poisson_problem, model=model, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore" diff --git a/tests/test_solvers/test_gpinn.py b/tests/test_solvers/test_gpinn.py index d00d3b4..7c2bb50 100644 --- a/tests/test_solvers/test_gpinn.py +++ b/tests/test_solvers/test_gpinn.py @@ -160,6 +160,18 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + poisson_problem.discretise_domain(100) + solver = GPINN(problem = poisson_problem, model=model, + extra_features=None, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore" diff --git a/tests/test_solvers/test_pinn.py b/tests/test_solvers/test_pinn.py index ea3b077..f3cf275 100644 --- a/tests/test_solvers/test_pinn.py +++ b/tests/test_solvers/test_pinn.py @@ -161,6 +161,18 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + poisson_problem.discretise_domain(100) + solver = PINN(problem = poisson_problem, model=model, + extra_features=None, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore" diff --git a/tests/test_solvers/test_rba_pinn.py b/tests/test_solvers/test_rba_pinn.py index 6622666..aad47bb 100644 --- a/tests/test_solvers/test_rba_pinn.py +++ b/tests/test_solvers/test_rba_pinn.py @@ -165,6 +165,18 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + poisson_problem.discretise_domain(100) + solver = PINN(problem = poisson_problem, model=model, + extra_features=None, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore" diff --git a/tests/test_solvers/test_sapinn.py b/tests/test_solvers/test_sapinn.py index 60c3094..45475fc 100644 --- a/tests/test_solvers/test_sapinn.py +++ b/tests/test_solvers/test_sapinn.py @@ -165,6 +165,18 @@ def test_train_cpu(): accelerator='cpu', batch_size=20) trainer.train() +def test_log(): + poisson_problem.discretise_domain(100) + solver = PINN(problem = poisson_problem, model=model, + extra_features=None, loss=LpLoss()) + trainer = Trainer(solver, max_epochs=2, accelerator='cpu') + trainer.train() + # assert the logged metrics are correct + logged_metrics = sorted(list(trainer.logged_metrics.keys())) + total_metrics = sorted( + list([key + '_loss' for key in poisson_problem.conditions.keys()]) + + ['mean_loss']) + assert logged_metrics == total_metrics def test_train_restore(): tmpdir = "tests/tmp_restore"