minor fix

This commit is contained in:
Dario Coscia
2023-09-19 15:13:50 +02:00
committed by Nicola Demo
parent 4d1187898f
commit 1936133ad5
5 changed files with 222 additions and 57 deletions

View File

@@ -0,0 +1,7 @@
__all__ = [
'SwitchOptimizer',
'R3Refinement',
]
from .optimizer_callbacks import SwitchOptimizer
from .adaptive_refinment_callbacks import R3Refinement

View File

@@ -2,65 +2,9 @@
from lightning.pytorch.callbacks import Callback
import torch
from .utils import check_consistency
from ..utils import check_consistency
class SwitchOptimizer(Callback):
"""
PINA implementation of a Lightining Callback to switch
optimizer during training. The rouutine can be used to
try multiple optimizers during the training, without the
need to stop training.
"""
def __init__(self, new_optimizers, new_optimizers_kargs, epoch_switch):
"""
SwitchOptimizer is a routine for switching optimizer during training.
:param torch.optim.Optimizer | list new_optimizers: The model optimizers to
switch to. It must be a list of :class:`torch.optim.Optimizer` or list of
:class:`torch.optim.Optimizer` for multiple model solvers.
:param dict| list new_optimizers: The model optimizers keyword arguments to
switch use. It must be a dict or list of dict for multiple optimizers.
:param int epoch_switch: Epoch for switching optimizer.
"""
super().__init__()
# check type consistency
check_consistency(new_optimizers, torch.optim.Optimizer, subclass=True)
check_consistency(new_optimizers_kargs, dict)
check_consistency(epoch_switch, int)
if epoch_switch < 1:
raise ValueError('epoch_switch must be greater than one.')
if not isinstance(new_optimizers, list):
optimizers = [new_optimizers]
optimizers_kwargs = [new_optimizers_kargs]
len_optimizer = len(optimizers)
len_optimizer_kwargs = len(optimizers_kwargs)
if len_optimizer_kwargs != len_optimizer:
raise ValueError('You must define one dictionary of keyword'
' arguments for each optimizers.'
f'Got {len_optimizer} optimizers, and'
f' {len_optimizer_kwargs} dicitionaries')
# save new optimizers
self._new_optimizers = optimizers
self._new_optimizers_kwargs = optimizers_kwargs
self._epoch_switch = epoch_switch
def on_train_epoch_start(self, trainer, __):
if trainer.current_epoch == self._epoch_switch:
optims = []
for idx, (optim, optim_kwargs) in enumerate(
zip(self._new_optimizers,
self._new_optimizers_kwargs)
):
optims.append(optim(trainer._model.models[idx].parameters(), **optim_kwargs))
trainer.optimizers = optims
class R3Refinement(Callback):
"""

View File

@@ -0,0 +1,62 @@
'''PINA Callbacks Implementations'''
from lightning.pytorch.callbacks import Callback
import torch
from ..utils import check_consistency
class SwitchOptimizer(Callback):
"""
PINA implementation of a Lightining Callback to switch
optimizer during training. The rouutine can be used to
try multiple optimizers during the training, without the
need to stop training.
"""
def __init__(self, new_optimizers, new_optimizers_kwargs, epoch_switch):
"""
SwitchOptimizer is a routine for switching optimizer during training.
:param torch.optim.Optimizer | list new_optimizers: The model optimizers to
switch to. It must be a list of :class:`torch.optim.Optimizer` or list of
:class:`torch.optim.Optimizer` for multiple model solvers.
:param dict| list new_optimizers: The model optimizers keyword arguments to
switch use. It must be a dict or list of dict for multiple optimizers.
:param int epoch_switch: Epoch for switching optimizer.
"""
super().__init__()
# check type consistency
check_consistency(new_optimizers, torch.optim.Optimizer, subclass=True)
check_consistency(new_optimizers_kwargs, dict)
check_consistency(epoch_switch, int)
if epoch_switch < 1:
raise ValueError('epoch_switch must be greater than one.')
if not isinstance(new_optimizers, list):
new_optimizers = [new_optimizers]
new_optimizers_kwargs = [new_optimizers_kwargs]
len_optimizer = len(new_optimizers)
len_optimizer_kwargs = len(new_optimizers_kwargs)
if len_optimizer_kwargs != len_optimizer:
raise ValueError('You must define one dictionary of keyword'
' arguments for each optimizers.'
f' Got {len_optimizer} optimizers, and'
f' {len_optimizer_kwargs} dicitionaries')
# save new optimizers
self._new_optimizers = new_optimizers
self._new_optimizers_kwargs = new_optimizers_kwargs
self._epoch_switch = epoch_switch
def on_train_epoch_start(self, trainer, __):
if trainer.current_epoch == self._epoch_switch:
optims = []
for idx, (optim, optim_kwargs) in enumerate(
zip(self._new_optimizers,
self._new_optimizers_kwargs)
):
optims.append(optim(trainer._model.models[idx].parameters(), **optim_kwargs))
trainer.optimizers = optims

View File

@@ -0,0 +1,71 @@
from pina.callbacks import R3Refinement
import torch
import pytest
from pina.problem import SpatialProblem
from pina.operators import laplacian
from pina.geometry import CartesianDomain
from pina import Condition, LabelTensor, PINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.equation.equation import Equation
from pina.equation.equation_factory import FixedValue
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
torch.sin(input_.extract(['y'])*torch.pi))
delta_u = laplacian(output_.extract(['u']), input_)
return delta_u - force_term
my_laplace = Equation(laplace_equation)
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
conditions = {
'gamma1': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 1}),
equation=FixedValue(0.0)),
'gamma2': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 0}),
equation=FixedValue(0.0)),
'gamma3': Condition(
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'gamma4': Condition(
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'D': Condition(
input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
equation=my_laplace),
'data': Condition(
input_points=in_,
output_points=out_)
}
# make the problem
poisson_problem = Poisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables))
# make the solver
solver = PINN(problem=poisson_problem, model=model)
def test_r3constructor():
R3Refinement(sample_every=10)
def test_r3refinment_routine():
# make the trainer
trainer = Trainer(solver=solver, callbacks=[R3Refinement(sample_every=1)], max_epochs=5)
trainer.train()

View File

@@ -0,0 +1,81 @@
from pina.callbacks import SwitchOptimizer
import torch
import pytest
from pina.problem import SpatialProblem
from pina.operators import laplacian
from pina.geometry import CartesianDomain
from pina import Condition, LabelTensor, PINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.equation.equation import Equation
from pina.equation.equation_factory import FixedValue
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
torch.sin(input_.extract(['y'])*torch.pi))
delta_u = laplacian(output_.extract(['u']), input_)
return delta_u - force_term
my_laplace = Equation(laplace_equation)
in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y'])
out_ = LabelTensor(torch.tensor([[0.]]), ['u'])
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
conditions = {
'gamma1': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 1}),
equation=FixedValue(0.0)),
'gamma2': Condition(
location=CartesianDomain({'x': [0, 1], 'y': 0}),
equation=FixedValue(0.0)),
'gamma3': Condition(
location=CartesianDomain({'x': 1, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'gamma4': Condition(
location=CartesianDomain({'x': 0, 'y': [0, 1]}),
equation=FixedValue(0.0)),
'D': Condition(
input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
equation=my_laplace),
'data': Condition(
input_points=in_,
output_points=out_)
}
# make the problem
poisson_problem = Poisson()
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
n = 10
poisson_problem.discretise_domain(n, 'grid', locations=boundaries)
model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables))
# make the solver
solver = PINN(problem=poisson_problem, model=model)
def test_switch_optimizer_constructor():
SwitchOptimizer(new_optimizers=torch.optim.Adam,
new_optimizers_kwargs={'lr':0.01},
epoch_switch=10)
with pytest.raises(ValueError):
SwitchOptimizer(new_optimizers=[torch.optim.Adam, torch.optim.Adam],
new_optimizers_kwargs=[{'lr':0.01}],
epoch_switch=10)
def test_switch_optimizer_routine():
# make the trainer
trainer = Trainer(solver=solver, callbacks=[SwitchOptimizer(new_optimizers=torch.optim.LBFGS,
new_optimizers_kwargs={'lr':0.01},
epoch_switch=3)], max_epochs=5)
trainer.train()