🎨 Format Python code with psf/black (#297)

Co-authored-by: dario-coscia <dario-coscia@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2024-05-10 14:08:01 +02:00
committed by GitHub
parent e0429bb445
commit 9463ae4b15
11 changed files with 169 additions and 160 deletions

View File

@@ -9,11 +9,10 @@ __all__ = [
"SupervisedSolver", "SupervisedSolver",
"ReducedOrderModelSolver", "ReducedOrderModelSolver",
"GAROM", "GAROM",
] ]
from .solver import SolverInterface from .solver import SolverInterface
from .pinns import * from .pinns import *
from .supervised import SupervisedSolver from .supervised import SupervisedSolver
from .rom import ReducedOrderModelSolver from .rom import ReducedOrderModelSolver
from .garom import GAROM from .garom import GAROM

View File

@@ -12,6 +12,7 @@ from torch.nn.modules.loss import _Loss
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
class PINNInterface(SolverInterface, metaclass=ABCMeta): class PINNInterface(SolverInterface, metaclass=ABCMeta):
""" """
Base PINN solver class. This class implements the Solver Interface Base PINN solver class. This class implements the Solver Interface
@@ -72,7 +73,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
self._clamp_params = self._clamp_inverse_problem_params self._clamp_params = self._clamp_inverse_problem_params
else: else:
self._params = None self._params = None
self._clamp_params = lambda : None self._clamp_params = lambda: None
# variable used internally to store residual losses at each epoch # variable used internally to store residual losses at each epoch
# this variable save the residual at each iteration (not weighted) # this variable save the residual at each iteration (not weighted)
@@ -195,7 +196,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
:param torch.Tensor loss_value: The value of the loss. :param torch.Tensor loss_value: The value of the loss.
""" """
self.log( self.log(
self.__logged_metric+'_loss', self.__logged_metric + "_loss",
loss_value, loss_value,
prog_bar=True, prog_bar=True,
logger=True, logger=True,
@@ -211,9 +212,9 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
""" """
if self.__logged_res_losses: if self.__logged_res_losses:
# storing mean loss # storing mean loss
self.__logged_metric = 'mean' self.__logged_metric = "mean"
self.store_log( self.store_log(
sum(self.__logged_res_losses)/len(self.__logged_res_losses) sum(self.__logged_res_losses) / len(self.__logged_res_losses)
) )
# free the logged losses # free the logged losses
self.__logged_res_losses = [] self.__logged_res_losses = []

View File

@@ -111,11 +111,13 @@ class CausalPINN(PINN):
) )
# checking consistency # checking consistency
check_consistency(eps, (int,float)) check_consistency(eps, (int, float))
self._eps = eps self._eps = eps
if not isinstance(self.problem, TimeDependentProblem): if not isinstance(self.problem, TimeDependentProblem):
raise ValueError('Casual PINN works only for problems' raise ValueError(
'inheritig from TimeDependentProblem.') "Casual PINN works only for problems"
"inheritig from TimeDependentProblem."
)
def loss_phys(self, samples, equation): def loss_phys(self, samples, equation):
""" """
@@ -144,7 +146,7 @@ class CausalPINN(PINN):
) )
time_loss.append(loss_val) time_loss.append(loss_val)
# store results # store results
self.store_log(loss_value=float(sum(time_loss)/len(time_loss))) self.store_log(loss_value=float(sum(time_loss) / len(time_loss)))
# concatenate residuals # concatenate residuals
time_loss = torch.stack(time_loss) time_loss = torch.stack(time_loss)
# compute weights (without the gradient storing) # compute weights (without the gradient storing)

View File

@@ -117,7 +117,7 @@ class CompetitivePINN(PINNInterface):
optimizer_discriminator_kwargs, optimizer_discriminator_kwargs,
], ],
extra_features=None, # CompetitivePINN doesn't take extra features extra_features=None, # CompetitivePINN doesn't take extra features
loss=loss loss=loss,
) )
# set automatic optimization for GANs # set automatic optimization for GANs
@@ -131,9 +131,7 @@ class CompetitivePINN(PINNInterface):
# assign schedulers # assign schedulers
self._schedulers = [ self._schedulers = [
scheduler_model( scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
self.optimizers[0], **scheduler_model_kwargs
),
scheduler_discriminator( scheduler_discriminator(
self.optimizers[1], **scheduler_discriminator_kwargs self.optimizers[1], **scheduler_discriminator_kwargs
), ),
@@ -195,8 +193,11 @@ class CompetitivePINN(PINNInterface):
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
self.optimizer_model.zero_grad() self.optimizer_model.zero_grad()
loss_val = super().loss_data( loss_val = (
input_tensor, output_tensor).as_subclass(torch.Tensor) super()
.loss_data(input_tensor, output_tensor)
.as_subclass(torch.Tensor)
)
loss_val.backward() loss_val.backward()
self.optimizer_model.step() self.optimizer_model.step()
return loss_val return loss_val
@@ -221,7 +222,7 @@ class CompetitivePINN(PINNInterface):
) )
return self.optimizers, self._schedulers return self.optimizers, self._schedulers
def on_train_batch_end(self,outputs, batch, batch_idx): def on_train_batch_end(self, outputs, batch, batch_idx):
""" """
This method is called at the end of each training batch, and ovverides This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints. the PytorchLightining implementation for logging the checkpoints.
@@ -235,7 +236,9 @@ class CompetitivePINN(PINNInterface):
:rtype: Any :rtype: Any
""" """
# increase by one the counter of optimization to save loggers # increase by one the counter of optimization to save loggers
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += 1 self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
1
)
return super().on_train_batch_end(outputs, batch, batch_idx) return super().on_train_batch_end(outputs, batch, batch_idx)
def _train_discriminator(self, samples, equation, discriminator_bets): def _train_discriminator(self, samples, equation, discriminator_bets):
@@ -252,13 +255,14 @@ class CompetitivePINN(PINNInterface):
self.optimizer_discriminator.zero_grad() self.optimizer_discriminator.zero_grad()
# compute residual, we detach because the weights of the generator # compute residual, we detach because the weights of the generator
# model are fixed # model are fixed
residual = self.compute_residual(samples=samples, residual = self.compute_residual(
equation=equation).detach() samples=samples, equation=equation
).detach()
# compute competitive residual, the minus is because we maximise # compute competitive residual, the minus is because we maximise
competitive_residual = residual * discriminator_bets competitive_residual = residual * discriminator_bets
loss_val = - self.loss( loss_val = -self.loss(
torch.zeros_like(competitive_residual, requires_grad=True), torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual competitive_residual,
).as_subclass(torch.Tensor) ).as_subclass(torch.Tensor)
# backprop # backprop
self.manual_backward(loss_val) self.manual_backward(loss_val)
@@ -283,16 +287,13 @@ class CompetitivePINN(PINNInterface):
residual = self.compute_residual(samples=samples, equation=equation) residual = self.compute_residual(samples=samples, equation=equation)
# store logging # store logging
with torch.no_grad(): with torch.no_grad():
loss_residual = self.loss( loss_residual = self.loss(torch.zeros_like(residual), residual)
torch.zeros_like(residual),
residual
)
# compute competitive residual, discriminator_bets are detached becase # compute competitive residual, discriminator_bets are detached becase
# we optimize only the generator model # we optimize only the generator model
competitive_residual = residual * discriminator_bets.detach() competitive_residual = residual * discriminator_bets.detach()
loss_val = self.loss( loss_val = self.loss(
torch.zeros_like(competitive_residual, requires_grad=True), torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual competitive_residual,
).as_subclass(torch.Tensor) ).as_subclass(torch.Tensor)
# backprop # backprop
self.manual_backward(loss_val) self.manual_backward(loss_val)

View File

@@ -100,11 +100,12 @@ class GPINN(PINN):
scheduler_kwargs=scheduler_kwargs, scheduler_kwargs=scheduler_kwargs,
) )
if not isinstance(self.problem, SpatialProblem): if not isinstance(self.problem, SpatialProblem):
raise ValueError('Gradient PINN computes the gradient of the ' raise ValueError(
'PINN loss with respect to the spatial ' "Gradient PINN computes the gradient of the "
'coordinates, thus the PINA problem must be ' "PINN loss with respect to the spatial "
'a SpatialProblem.') "coordinates, thus the PINA problem must be "
"a SpatialProblem."
)
def loss_phys(self, samples, equation): def loss_phys(self, samples, equation):
""" """
@@ -126,7 +127,7 @@ class GPINN(PINN):
self.store_log(loss_value=float(loss_value)) self.store_log(loss_value=float(loss_value))
# gradient PINN loss # gradient PINN loss
loss_value = loss_value.reshape(-1, 1) loss_value = loss_value.reshape(-1, 1)
loss_value.labels = ['__LOSS'] loss_value.labels = ["__LOSS"]
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables) loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
g_loss_phys = self.loss( g_loss_phys = self.loss(
torch.zeros_like(loss_grad, requires_grad=True), loss_grad torch.zeros_like(loss_grad, requires_grad=True), loss_grad

View File

@@ -87,7 +87,7 @@ class PINN(PINNInterface):
optimizers=[optimizer], optimizers=[optimizer],
optimizers_kwargs=[optimizer_kwargs], optimizers_kwargs=[optimizer_kwargs],
extra_features=extra_features, extra_features=extra_features,
loss=loss loss=loss,
) )
# check consistency # check consistency
@@ -131,7 +131,6 @@ class PINN(PINNInterface):
self.store_log(loss_value=float(loss_value)) self.store_log(loss_value=float(loss_value))
return loss_value return loss_value
def configure_optimizers(self): def configure_optimizers(self):
""" """
Optimizer configuration for the PINN Optimizer configuration for the PINN
@@ -153,7 +152,6 @@ class PINN(PINNInterface):
) )
return self.optimizers, [self.scheduler] return self.optimizers, [self.scheduler]
@property @property
def scheduler(self): def scheduler(self):
""" """
@@ -161,7 +159,6 @@ class PINN(PINNInterface):
""" """
return self._scheduler return self._scheduler
@property @property
def neural_net(self): def neural_net(self):
""" """

View File

@@ -14,6 +14,7 @@ from pina.problem import InverseProblem
from torch.optim.lr_scheduler import ConstantLR from torch.optim.lr_scheduler import ConstantLR
class Weights(torch.nn.Module): class Weights(torch.nn.Module):
""" """
This class aims to implements the mask model for This class aims to implements the mask model for
@@ -27,9 +28,7 @@ class Weights(torch.nn.Module):
""" """
super().__init__() super().__init__()
check_consistency(func, torch.nn.Module) check_consistency(func, torch.nn.Module)
self.sa_weights = torch.nn.Parameter( self.sa_weights = torch.nn.Parameter(torch.Tensor())
torch.Tensor()
)
self.func = func self.func = func
def forward(self): def forward(self):
@@ -43,6 +42,7 @@ class Weights(torch.nn.Module):
""" """
return self.func(self.sa_weights) return self.func(self.sa_weights)
class SAPINN(PINNInterface): class SAPINN(PINNInterface):
r""" r"""
Self Adaptive Physics Informed Neural Network (SAPINN) solver class. Self Adaptive Physics Informed Neural Network (SAPINN) solver class.
@@ -115,13 +115,13 @@ class SAPINN(PINNInterface):
extra_features=None, extra_features=None,
loss=torch.nn.MSELoss(), loss=torch.nn.MSELoss(),
optimizer_model=torch.optim.Adam, optimizer_model=torch.optim.Adam,
optimizer_model_kwargs={"lr" : 0.001}, optimizer_model_kwargs={"lr": 0.001},
optimizer_weights=torch.optim.Adam, optimizer_weights=torch.optim.Adam,
optimizer_weights_kwargs={"lr" : 0.001}, optimizer_weights_kwargs={"lr": 0.001},
scheduler_model=ConstantLR, scheduler_model=ConstantLR,
scheduler_model_kwargs={"factor" : 1, "total_iters" : 0}, scheduler_model_kwargs={"factor": 1, "total_iters": 0},
scheduler_weights=ConstantLR, scheduler_weights=ConstantLR,
scheduler_weights_kwargs={"factor" : 1, "total_iters" : 0} scheduler_weights_kwargs={"factor": 1, "total_iters": 0},
): ):
""" """
:param AbstractProblem problem: The formualation of the problem. :param AbstractProblem problem: The formualation of the problem.
@@ -167,17 +167,16 @@ class SAPINN(PINNInterface):
weights_dict[condition_name] = Weights(weights_function) weights_dict[condition_name] = Weights(weights_function)
weights_dict = torch.nn.ModuleDict(weights_dict) weights_dict = torch.nn.ModuleDict(weights_dict)
super().__init__( super().__init__(
models=[model, weights_dict], models=[model, weights_dict],
problem=problem, problem=problem,
optimizers=[optimizer_model, optimizer_weights], optimizers=[optimizer_model, optimizer_weights],
optimizers_kwargs=[ optimizers_kwargs=[
optimizer_model_kwargs, optimizer_model_kwargs,
optimizer_weights_kwargs optimizer_weights_kwargs,
], ],
extra_features=extra_features, extra_features=extra_features,
loss=loss loss=loss,
) )
# set automatic optimization # set automatic optimization
@@ -191,12 +190,8 @@ class SAPINN(PINNInterface):
# assign schedulers # assign schedulers
self._schedulers = [ self._schedulers = [
scheduler_model( scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
self.optimizers[0], **scheduler_model_kwargs scheduler_weights(self.optimizers[1], **scheduler_weights_kwargs),
),
scheduler_weights(
self.optimizers[1], **scheduler_weights_kwargs
),
] ]
self._model = self.models[0] self._model = self.models[0]
@@ -235,7 +230,7 @@ class SAPINN(PINNInterface):
# train weights # train weights
self.optimizer_weights.zero_grad() self.optimizer_weights.zero_grad()
weighted_loss, _ = self._loss_phys(samples, equation) weighted_loss, _ = self._loss_phys(samples, equation)
loss_value = - weighted_loss.as_subclass(torch.Tensor) loss_value = -weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value) self.manual_backward(loss_value)
self.optimizer_weights.step() self.optimizer_weights.step()
@@ -271,7 +266,7 @@ class SAPINN(PINNInterface):
# train weights # train weights
self.optimizer_weights.zero_grad() self.optimizer_weights.zero_grad()
weighted_loss, _ = self._loss_data(input_tensor, output_tensor) weighted_loss, _ = self._loss_data(input_tensor, output_tensor)
loss_value = - weighted_loss.as_subclass(torch.Tensor) loss_value = -weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value) self.manual_backward(loss_value)
self.optimizer_weights.step() self.optimizer_weights.step()
@@ -313,7 +308,7 @@ class SAPINN(PINNInterface):
) )
return self.optimizers, self._schedulers return self.optimizers, self._schedulers
def on_train_batch_end(self,outputs, batch, batch_idx): def on_train_batch_end(self, outputs, batch, batch_idx):
""" """
This method is called at the end of each training batch, and ovverides This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints. the PytorchLightining implementation for logging the checkpoints.
@@ -327,7 +322,9 @@ class SAPINN(PINNInterface):
:rtype: Any :rtype: Any
""" """
# increase by one the counter of optimization to save loggers # increase by one the counter of optimization to save loggers
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += 1 self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
1
)
return super().on_train_batch_end(outputs, batch, batch_idx) return super().on_train_batch_end(outputs, batch, batch_idx)
def on_train_start(self): def on_train_start(self):
@@ -343,9 +340,8 @@ class SAPINN(PINNInterface):
self.trainer._accelerator_connector._accelerator_flag self.trainer._accelerator_connector._accelerator_flag
) )
for condition_name, tensor in self.problem.input_pts.items(): for condition_name, tensor in self.problem.input_pts.items():
self.weights_dict.torchmodel[condition_name].sa_weights.data = torch.rand( self.weights_dict.torchmodel[condition_name].sa_weights.data = (
(tensor.shape[0], 1), torch.rand((tensor.shape[0], 1), device=device)
device = device
) )
return super().on_train_start() return super().on_train_start()
@@ -358,8 +354,8 @@ class SAPINN(PINNInterface):
:param dict checkpoint: Pytorch Lightning checkpoint dict. :param dict checkpoint: Pytorch Lightning checkpoint dict.
""" """
for condition_name, tensor in self.problem.input_pts.items(): for condition_name, tensor in self.problem.input_pts.items():
self.weights_dict.torchmodel[condition_name].sa_weights.data = torch.rand( self.weights_dict.torchmodel[condition_name].sa_weights.data = (
(tensor.shape[0], 1) torch.rand((tensor.shape[0], 1))
) )
return super().on_load_checkpoint(checkpoint) return super().on_load_checkpoint(checkpoint)
@@ -403,12 +399,14 @@ class SAPINN(PINNInterface):
:rtype List[LabelTensor, LabelTensor] :rtype List[LabelTensor, LabelTensor]
""" """
weights = self.weights_dict.torchmodel[ weights = self.weights_dict.torchmodel[
self.current_condition_name].forward() self.current_condition_name
loss_value = self._vectorial_loss(torch.zeros_like( ].forward()
residual, requires_grad=True), residual) loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return ( return (
self._vect_to_scalar(weights * loss_value), self._vect_to_scalar(weights * loss_value),
self._vect_to_scalar(loss_value) self._vect_to_scalar(loss_value),
) )
def _vect_to_scalar(self, loss_value): def _vect_to_scalar(self, loss_value):
@@ -426,11 +424,12 @@ class SAPINN(PINNInterface):
elif self.loss.reduction == "sum": elif self.loss.reduction == "sum":
ret = torch.sum(loss_value) ret = torch.sum(loss_value)
else: else:
raise RuntimeError(f"Invalid reduction, got {self.loss.reduction} " raise RuntimeError(
"but expected mean or sum.") f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret return ret
@property @property
def neural_net(self): def neural_net(self):
""" """

View File

@@ -4,6 +4,7 @@ import torch
from pina.solvers import SupervisedSolver from pina.solvers import SupervisedSolver
class ReducedOrderModelSolver(SupervisedSolver): class ReducedOrderModelSolver(SupervisedSolver):
r""" r"""
ReducedOrderModelSolver solver class. This class implements a ReducedOrderModelSolver solver class. This class implements a
@@ -114,9 +115,12 @@ class ReducedOrderModelSolver(SupervisedSolver):
rate scheduler. rate scheduler.
:param dict scheduler_kwargs: LR scheduler constructor keyword args. :param dict scheduler_kwargs: LR scheduler constructor keyword args.
""" """
model = torch.nn.ModuleDict({ model = torch.nn.ModuleDict(
'reduction_network' : reduction_network, {
'interpolation_network' : interpolation_network}) "reduction_network": reduction_network,
"interpolation_network": interpolation_network,
}
)
super().__init__( super().__init__(
model=model, model=model,
@@ -125,18 +129,22 @@ class ReducedOrderModelSolver(SupervisedSolver):
optimizer=optimizer, optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs, optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler, scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs scheduler_kwargs=scheduler_kwargs,
) )
# assert reduction object contains encode/ decode # assert reduction object contains encode/ decode
if not hasattr(self.neural_net['reduction_network'], 'encode'): if not hasattr(self.neural_net["reduction_network"], "encode"):
raise SyntaxError('reduction_network must have encode method. ' raise SyntaxError(
'The encode method should return a lower ' "reduction_network must have encode method. "
'dimensional representation of the input.') "The encode method should return a lower "
if not hasattr(self.neural_net['reduction_network'], 'decode'): "dimensional representation of the input."
raise SyntaxError('reduction_network must have decode method. ' )
'The decode method should return a high ' if not hasattr(self.neural_net["reduction_network"], "decode"):
'dimensional representation of the encoding.') raise SyntaxError(
"reduction_network must have decode method. "
"The decode method should return a high "
"dimensional representation of the encoding."
)
def forward(self, x): def forward(self, x):
""" """
@@ -149,8 +157,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
:return: Solver solution. :return: Solver solution.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
reduction_network = self.neural_net['reduction_network'] reduction_network = self.neural_net["reduction_network"]
interpolation_network = self.neural_net['interpolation_network'] interpolation_network = self.neural_net["interpolation_network"]
return reduction_network.decode(interpolation_network(x)) return reduction_network.decode(interpolation_network(x))
def loss_data(self, input_pts, output_pts): def loss_data(self, input_pts, output_pts):
@@ -167,17 +175,18 @@ class ReducedOrderModelSolver(SupervisedSolver):
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
# extract networks # extract networks
reduction_network = self.neural_net['reduction_network'] reduction_network = self.neural_net["reduction_network"]
interpolation_network = self.neural_net['interpolation_network'] interpolation_network = self.neural_net["interpolation_network"]
# encoded representations loss # encoded representations loss
encode_repr_inter_net = interpolation_network(input_pts) encode_repr_inter_net = interpolation_network(input_pts)
encode_repr_reduction_network = reduction_network.encode(output_pts) encode_repr_reduction_network = reduction_network.encode(output_pts)
loss_encode = self.loss(encode_repr_inter_net, loss_encode = self.loss(
encode_repr_reduction_network) encode_repr_inter_net, encode_repr_reduction_network
)
# reconstruction loss # reconstruction loss
loss_reconstruction = self.loss( loss_reconstruction = self.loss(
reduction_network.decode(encode_repr_reduction_network), reduction_network.decode(encode_repr_reduction_network), output_pts
output_pts) )
return loss_encode + loss_reconstruction return loss_encode + loss_reconstruction

View File

@@ -67,9 +67,9 @@ class Trainer(pytorch_lightning.Trainer):
pb = self._model.problem pb = self._model.problem
if hasattr(pb, "unknown_parameters"): if hasattr(pb, "unknown_parameters"):
for key in pb.unknown_parameters: for key in pb.unknown_parameters:
pb.unknown_parameters[key] = torch.nn.Parameter(pb.unknown_parameters[key].data.to(device)) pb.unknown_parameters[key] = torch.nn.Parameter(
pb.unknown_parameters[key].data.to(device)
)
def train(self, **kwargs): def train(self, **kwargs):
""" """