🎨 Format Python code with psf/black (#297)
Co-authored-by: dario-coscia <dario-coscia@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
e0429bb445
commit
9463ae4b15
@@ -9,11 +9,10 @@ __all__ = [
|
|||||||
"SupervisedSolver",
|
"SupervisedSolver",
|
||||||
"ReducedOrderModelSolver",
|
"ReducedOrderModelSolver",
|
||||||
"GAROM",
|
"GAROM",
|
||||||
]
|
]
|
||||||
|
|
||||||
from .solver import SolverInterface
|
from .solver import SolverInterface
|
||||||
from .pinns import *
|
from .pinns import *
|
||||||
from .supervised import SupervisedSolver
|
from .supervised import SupervisedSolver
|
||||||
from .rom import ReducedOrderModelSolver
|
from .rom import ReducedOrderModelSolver
|
||||||
from .garom import GAROM
|
from .garom import GAROM
|
||||||
|
|
||||||
|
|||||||
@@ -12,11 +12,12 @@ from torch.nn.modules.loss import _Loss
|
|||||||
|
|
||||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||||
|
|
||||||
|
|
||||||
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
||||||
"""
|
"""
|
||||||
Base PINN solver class. This class implements the Solver Interface
|
Base PINN solver class. This class implements the Solver Interface
|
||||||
for Physics Informed Neural Network solvers.
|
for Physics Informed Neural Network solvers.
|
||||||
|
|
||||||
This class can be used to
|
This class can be used to
|
||||||
define PINNs with multiple ``optimizers``, and/or ``models``.
|
define PINNs with multiple ``optimizers``, and/or ``models``.
|
||||||
By default it takes
|
By default it takes
|
||||||
@@ -72,7 +73,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
self._clamp_params = self._clamp_inverse_problem_params
|
self._clamp_params = self._clamp_inverse_problem_params
|
||||||
else:
|
else:
|
||||||
self._params = None
|
self._params = None
|
||||||
self._clamp_params = lambda : None
|
self._clamp_params = lambda: None
|
||||||
|
|
||||||
# variable used internally to store residual losses at each epoch
|
# variable used internally to store residual losses at each epoch
|
||||||
# this variable save the residual at each iteration (not weighted)
|
# this variable save the residual at each iteration (not weighted)
|
||||||
@@ -107,7 +108,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
condition = self.problem.conditions[condition_name]
|
condition = self.problem.conditions[condition_name]
|
||||||
pts = batch["pts"]
|
pts = batch["pts"]
|
||||||
# condition name is logged (if logs enabled)
|
# condition name is logged (if logs enabled)
|
||||||
self.__logged_metric = condition_name
|
self.__logged_metric = condition_name
|
||||||
|
|
||||||
if len(batch) == 2:
|
if len(batch) == 2:
|
||||||
samples = pts[condition_idx == condition_id]
|
samples = pts[condition_idx == condition_id]
|
||||||
@@ -160,7 +161,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def compute_residual(self, samples, equation):
|
def compute_residual(self, samples, equation):
|
||||||
"""
|
"""
|
||||||
Compute the residual for Physics Informed learning. This function
|
Compute the residual for Physics Informed learning. This function
|
||||||
@@ -182,7 +183,7 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
samples, self.forward(samples), self._params
|
samples, self.forward(samples), self._params
|
||||||
)
|
)
|
||||||
return residual
|
return residual
|
||||||
|
|
||||||
def store_log(self, loss_value):
|
def store_log(self, loss_value):
|
||||||
"""
|
"""
|
||||||
Stores the loss value in the logger. This function should be
|
Stores the loss value in the logger. This function should be
|
||||||
@@ -195,13 +196,13 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
:param torch.Tensor loss_value: The value of the loss.
|
:param torch.Tensor loss_value: The value of the loss.
|
||||||
"""
|
"""
|
||||||
self.log(
|
self.log(
|
||||||
self.__logged_metric+'_loss',
|
self.__logged_metric + "_loss",
|
||||||
loss_value,
|
loss_value,
|
||||||
prog_bar=True,
|
prog_bar=True,
|
||||||
logger=True,
|
logger=True,
|
||||||
on_epoch=True,
|
on_epoch=True,
|
||||||
on_step=False,
|
on_step=False,
|
||||||
)
|
)
|
||||||
self.__logged_res_losses.append(loss_value)
|
self.__logged_res_losses.append(loss_value)
|
||||||
|
|
||||||
def on_train_epoch_end(self):
|
def on_train_epoch_end(self):
|
||||||
@@ -211,10 +212,10 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
if self.__logged_res_losses:
|
if self.__logged_res_losses:
|
||||||
# storing mean loss
|
# storing mean loss
|
||||||
self.__logged_metric = 'mean'
|
self.__logged_metric = "mean"
|
||||||
self.store_log(
|
self.store_log(
|
||||||
sum(self.__logged_res_losses)/len(self.__logged_res_losses)
|
sum(self.__logged_res_losses) / len(self.__logged_res_losses)
|
||||||
)
|
)
|
||||||
# free the logged losses
|
# free the logged losses
|
||||||
self.__logged_res_losses = []
|
self.__logged_res_losses = []
|
||||||
return super().on_train_epoch_end()
|
return super().on_train_epoch_end()
|
||||||
@@ -244,4 +245,4 @@ class PINNInterface(SolverInterface, metaclass=ABCMeta):
|
|||||||
:meth:`loss_phys` to extract the condition at which the loss is
|
:meth:`loss_phys` to extract the condition at which the loss is
|
||||||
computed.
|
computed.
|
||||||
"""
|
"""
|
||||||
return self.__logged_metric
|
return self.__logged_metric
|
||||||
|
|||||||
@@ -97,25 +97,27 @@ class CausalPINN(PINN):
|
|||||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||||
:param int | float eps: The exponential decay parameter. Note that this
|
:param int | float eps: The exponential decay parameter. Note that this
|
||||||
value is kept fixed during the training, but can be changed by means
|
value is kept fixed during the training, but can be changed by means
|
||||||
of a callback, e.g. for annealing.
|
of a callback, e.g. for annealing.
|
||||||
"""
|
"""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
problem=problem,
|
problem=problem,
|
||||||
model=model,
|
model=model,
|
||||||
extra_features=extra_features,
|
extra_features=extra_features,
|
||||||
loss=loss,
|
loss=loss,
|
||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
optimizer_kwargs=optimizer_kwargs,
|
optimizer_kwargs=optimizer_kwargs,
|
||||||
scheduler=scheduler,
|
scheduler=scheduler,
|
||||||
scheduler_kwargs=scheduler_kwargs,
|
scheduler_kwargs=scheduler_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
# checking consistency
|
# checking consistency
|
||||||
check_consistency(eps, (int,float))
|
check_consistency(eps, (int, float))
|
||||||
self._eps = eps
|
self._eps = eps
|
||||||
if not isinstance(self.problem, TimeDependentProblem):
|
if not isinstance(self.problem, TimeDependentProblem):
|
||||||
raise ValueError('Casual PINN works only for problems'
|
raise ValueError(
|
||||||
'inheritig from TimeDependentProblem.')
|
"Casual PINN works only for problems"
|
||||||
|
"inheritig from TimeDependentProblem."
|
||||||
|
)
|
||||||
|
|
||||||
def loss_phys(self, samples, equation):
|
def loss_phys(self, samples, equation):
|
||||||
"""
|
"""
|
||||||
@@ -144,14 +146,14 @@ class CausalPINN(PINN):
|
|||||||
)
|
)
|
||||||
time_loss.append(loss_val)
|
time_loss.append(loss_val)
|
||||||
# store results
|
# store results
|
||||||
self.store_log(loss_value=float(sum(time_loss)/len(time_loss)))
|
self.store_log(loss_value=float(sum(time_loss) / len(time_loss)))
|
||||||
# concatenate residuals
|
# concatenate residuals
|
||||||
time_loss = torch.stack(time_loss)
|
time_loss = torch.stack(time_loss)
|
||||||
# compute weights (without the gradient storing)
|
# compute weights (without the gradient storing)
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
weights = self._compute_weights(time_loss)
|
weights = self._compute_weights(time_loss)
|
||||||
return (weights * time_loss).mean()
|
return (weights * time_loss).mean()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def eps(self):
|
def eps(self):
|
||||||
"""
|
"""
|
||||||
@@ -205,8 +207,8 @@ class CausalPINN(PINN):
|
|||||||
_, idx_split = time_tensor.unique(return_counts=True)
|
_, idx_split = time_tensor.unique(return_counts=True)
|
||||||
# splitting
|
# splitting
|
||||||
chunks = torch.split(tensor, tuple(idx_split))
|
chunks = torch.split(tensor, tuple(idx_split))
|
||||||
return chunks, labels # return chunks
|
return chunks, labels # return chunks
|
||||||
|
|
||||||
def _compute_weights(self, loss):
|
def _compute_weights(self, loss):
|
||||||
"""
|
"""
|
||||||
Computes the weights for the physics loss based on the cumulative loss.
|
Computes the weights for the physics loss based on the cumulative loss.
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ class CompetitivePINN(PINNInterface):
|
|||||||
optimizer_discriminator_kwargs,
|
optimizer_discriminator_kwargs,
|
||||||
],
|
],
|
||||||
extra_features=None, # CompetitivePINN doesn't take extra features
|
extra_features=None, # CompetitivePINN doesn't take extra features
|
||||||
loss=loss
|
loss=loss,
|
||||||
)
|
)
|
||||||
|
|
||||||
# set automatic optimization for GANs
|
# set automatic optimization for GANs
|
||||||
@@ -131,9 +131,7 @@ class CompetitivePINN(PINNInterface):
|
|||||||
|
|
||||||
# assign schedulers
|
# assign schedulers
|
||||||
self._schedulers = [
|
self._schedulers = [
|
||||||
scheduler_model(
|
scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
|
||||||
self.optimizers[0], **scheduler_model_kwargs
|
|
||||||
),
|
|
||||||
scheduler_discriminator(
|
scheduler_discriminator(
|
||||||
self.optimizers[1], **scheduler_discriminator_kwargs
|
self.optimizers[1], **scheduler_discriminator_kwargs
|
||||||
),
|
),
|
||||||
@@ -141,7 +139,7 @@ class CompetitivePINN(PINNInterface):
|
|||||||
|
|
||||||
self._model = self.models[0]
|
self._model = self.models[0]
|
||||||
self._discriminator = self.models[1]
|
self._discriminator = self.models[1]
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
r"""
|
r"""
|
||||||
Forward pass implementation for the PINN solver. It returns the function
|
Forward pass implementation for the PINN solver. It returns the function
|
||||||
@@ -195,8 +193,11 @@ class CompetitivePINN(PINNInterface):
|
|||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
self.optimizer_model.zero_grad()
|
self.optimizer_model.zero_grad()
|
||||||
loss_val = super().loss_data(
|
loss_val = (
|
||||||
input_tensor, output_tensor).as_subclass(torch.Tensor)
|
super()
|
||||||
|
.loss_data(input_tensor, output_tensor)
|
||||||
|
.as_subclass(torch.Tensor)
|
||||||
|
)
|
||||||
loss_val.backward()
|
loss_val.backward()
|
||||||
self.optimizer_model.step()
|
self.optimizer_model.step()
|
||||||
return loss_val
|
return loss_val
|
||||||
@@ -221,7 +222,7 @@ class CompetitivePINN(PINNInterface):
|
|||||||
)
|
)
|
||||||
return self.optimizers, self._schedulers
|
return self.optimizers, self._schedulers
|
||||||
|
|
||||||
def on_train_batch_end(self,outputs, batch, batch_idx):
|
def on_train_batch_end(self, outputs, batch, batch_idx):
|
||||||
"""
|
"""
|
||||||
This method is called at the end of each training batch, and ovverides
|
This method is called at the end of each training batch, and ovverides
|
||||||
the PytorchLightining implementation for logging the checkpoints.
|
the PytorchLightining implementation for logging the checkpoints.
|
||||||
@@ -235,7 +236,9 @@ class CompetitivePINN(PINNInterface):
|
|||||||
:rtype: Any
|
:rtype: Any
|
||||||
"""
|
"""
|
||||||
# increase by one the counter of optimization to save loggers
|
# increase by one the counter of optimization to save loggers
|
||||||
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += 1
|
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
|
||||||
|
1
|
||||||
|
)
|
||||||
return super().on_train_batch_end(outputs, batch, batch_idx)
|
return super().on_train_batch_end(outputs, batch, batch_idx)
|
||||||
|
|
||||||
def _train_discriminator(self, samples, equation, discriminator_bets):
|
def _train_discriminator(self, samples, equation, discriminator_bets):
|
||||||
@@ -252,13 +255,14 @@ class CompetitivePINN(PINNInterface):
|
|||||||
self.optimizer_discriminator.zero_grad()
|
self.optimizer_discriminator.zero_grad()
|
||||||
# compute residual, we detach because the weights of the generator
|
# compute residual, we detach because the weights of the generator
|
||||||
# model are fixed
|
# model are fixed
|
||||||
residual = self.compute_residual(samples=samples,
|
residual = self.compute_residual(
|
||||||
equation=equation).detach()
|
samples=samples, equation=equation
|
||||||
|
).detach()
|
||||||
# compute competitive residual, the minus is because we maximise
|
# compute competitive residual, the minus is because we maximise
|
||||||
competitive_residual = residual * discriminator_bets
|
competitive_residual = residual * discriminator_bets
|
||||||
loss_val = - self.loss(
|
loss_val = -self.loss(
|
||||||
torch.zeros_like(competitive_residual, requires_grad=True),
|
torch.zeros_like(competitive_residual, requires_grad=True),
|
||||||
competitive_residual
|
competitive_residual,
|
||||||
).as_subclass(torch.Tensor)
|
).as_subclass(torch.Tensor)
|
||||||
# backprop
|
# backprop
|
||||||
self.manual_backward(loss_val)
|
self.manual_backward(loss_val)
|
||||||
@@ -283,16 +287,13 @@ class CompetitivePINN(PINNInterface):
|
|||||||
residual = self.compute_residual(samples=samples, equation=equation)
|
residual = self.compute_residual(samples=samples, equation=equation)
|
||||||
# store logging
|
# store logging
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
loss_residual = self.loss(
|
loss_residual = self.loss(torch.zeros_like(residual), residual)
|
||||||
torch.zeros_like(residual),
|
|
||||||
residual
|
|
||||||
)
|
|
||||||
# compute competitive residual, discriminator_bets are detached becase
|
# compute competitive residual, discriminator_bets are detached becase
|
||||||
# we optimize only the generator model
|
# we optimize only the generator model
|
||||||
competitive_residual = residual * discriminator_bets.detach()
|
competitive_residual = residual * discriminator_bets.detach()
|
||||||
loss_val = self.loss(
|
loss_val = self.loss(
|
||||||
torch.zeros_like(competitive_residual, requires_grad=True),
|
torch.zeros_like(competitive_residual, requires_grad=True),
|
||||||
competitive_residual
|
competitive_residual,
|
||||||
).as_subclass(torch.Tensor)
|
).as_subclass(torch.Tensor)
|
||||||
# backprop
|
# backprop
|
||||||
self.manual_backward(loss_val)
|
self.manual_backward(loss_val)
|
||||||
@@ -357,4 +358,4 @@ class CompetitivePINN(PINNInterface):
|
|||||||
:return: The scheduler for the discriminator.
|
:return: The scheduler for the discriminator.
|
||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||||
"""
|
"""
|
||||||
return self._schedulers[1]
|
return self._schedulers[1]
|
||||||
|
|||||||
@@ -90,22 +90,23 @@ class GPINN(PINN):
|
|||||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||||
"""
|
"""
|
||||||
super().__init__(
|
super().__init__(
|
||||||
problem=problem,
|
problem=problem,
|
||||||
model=model,
|
model=model,
|
||||||
extra_features=extra_features,
|
extra_features=extra_features,
|
||||||
loss=loss,
|
loss=loss,
|
||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
optimizer_kwargs=optimizer_kwargs,
|
optimizer_kwargs=optimizer_kwargs,
|
||||||
scheduler=scheduler,
|
scheduler=scheduler,
|
||||||
scheduler_kwargs=scheduler_kwargs,
|
scheduler_kwargs=scheduler_kwargs,
|
||||||
)
|
)
|
||||||
if not isinstance(self.problem, SpatialProblem):
|
if not isinstance(self.problem, SpatialProblem):
|
||||||
raise ValueError('Gradient PINN computes the gradient of the '
|
raise ValueError(
|
||||||
'PINN loss with respect to the spatial '
|
"Gradient PINN computes the gradient of the "
|
||||||
'coordinates, thus the PINA problem must be '
|
"PINN loss with respect to the spatial "
|
||||||
'a SpatialProblem.')
|
"coordinates, thus the PINA problem must be "
|
||||||
|
"a SpatialProblem."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def loss_phys(self, samples, equation):
|
def loss_phys(self, samples, equation):
|
||||||
"""
|
"""
|
||||||
Computes the physics loss for the GPINN solver based on given
|
Computes the physics loss for the GPINN solver based on given
|
||||||
@@ -126,9 +127,9 @@ class GPINN(PINN):
|
|||||||
self.store_log(loss_value=float(loss_value))
|
self.store_log(loss_value=float(loss_value))
|
||||||
# gradient PINN loss
|
# gradient PINN loss
|
||||||
loss_value = loss_value.reshape(-1, 1)
|
loss_value = loss_value.reshape(-1, 1)
|
||||||
loss_value.labels = ['__LOSS']
|
loss_value.labels = ["__LOSS"]
|
||||||
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
|
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
|
||||||
g_loss_phys = self.loss(
|
g_loss_phys = self.loss(
|
||||||
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
|
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
|
||||||
)
|
)
|
||||||
return loss_value + g_loss_phys
|
return loss_value + g_loss_phys
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ class PINN(PINNInterface):
|
|||||||
optimizers=[optimizer],
|
optimizers=[optimizer],
|
||||||
optimizers_kwargs=[optimizer_kwargs],
|
optimizers_kwargs=[optimizer_kwargs],
|
||||||
extra_features=extra_features,
|
extra_features=extra_features,
|
||||||
loss=loss
|
loss=loss,
|
||||||
)
|
)
|
||||||
|
|
||||||
# check consistency
|
# check consistency
|
||||||
@@ -131,7 +131,6 @@ class PINN(PINNInterface):
|
|||||||
self.store_log(loss_value=float(loss_value))
|
self.store_log(loss_value=float(loss_value))
|
||||||
return loss_value
|
return loss_value
|
||||||
|
|
||||||
|
|
||||||
def configure_optimizers(self):
|
def configure_optimizers(self):
|
||||||
"""
|
"""
|
||||||
Optimizer configuration for the PINN
|
Optimizer configuration for the PINN
|
||||||
@@ -153,7 +152,6 @@ class PINN(PINNInterface):
|
|||||||
)
|
)
|
||||||
return self.optimizers, [self.scheduler]
|
return self.optimizers, [self.scheduler]
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def scheduler(self):
|
def scheduler(self):
|
||||||
"""
|
"""
|
||||||
@@ -161,10 +159,9 @@ class PINN(PINNInterface):
|
|||||||
"""
|
"""
|
||||||
return self._scheduler
|
return self._scheduler
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def neural_net(self):
|
def neural_net(self):
|
||||||
"""
|
"""
|
||||||
Neural network for the PINN training.
|
Neural network for the PINN training.
|
||||||
"""
|
"""
|
||||||
return self._neural_net
|
return self._neural_net
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ from pina.problem import InverseProblem
|
|||||||
|
|
||||||
from torch.optim.lr_scheduler import ConstantLR
|
from torch.optim.lr_scheduler import ConstantLR
|
||||||
|
|
||||||
|
|
||||||
class Weights(torch.nn.Module):
|
class Weights(torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
This class aims to implements the mask model for
|
This class aims to implements the mask model for
|
||||||
@@ -27,11 +28,9 @@ class Weights(torch.nn.Module):
|
|||||||
"""
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
check_consistency(func, torch.nn.Module)
|
check_consistency(func, torch.nn.Module)
|
||||||
self.sa_weights = torch.nn.Parameter(
|
self.sa_weights = torch.nn.Parameter(torch.Tensor())
|
||||||
torch.Tensor()
|
|
||||||
)
|
|
||||||
self.func = func
|
self.func = func
|
||||||
|
|
||||||
def forward(self):
|
def forward(self):
|
||||||
"""
|
"""
|
||||||
Forward pass implementation for the mask module.
|
Forward pass implementation for the mask module.
|
||||||
@@ -43,6 +42,7 @@ class Weights(torch.nn.Module):
|
|||||||
"""
|
"""
|
||||||
return self.func(self.sa_weights)
|
return self.func(self.sa_weights)
|
||||||
|
|
||||||
|
|
||||||
class SAPINN(PINNInterface):
|
class SAPINN(PINNInterface):
|
||||||
r"""
|
r"""
|
||||||
Self Adaptive Physics Informed Neural Network (SAPINN) solver class.
|
Self Adaptive Physics Informed Neural Network (SAPINN) solver class.
|
||||||
@@ -106,22 +106,22 @@ class SAPINN(PINNInterface):
|
|||||||
DOI: `10.1016/
|
DOI: `10.1016/
|
||||||
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
problem,
|
problem,
|
||||||
model,
|
model,
|
||||||
weights_function=torch.nn.Sigmoid(),
|
weights_function=torch.nn.Sigmoid(),
|
||||||
extra_features=None,
|
extra_features=None,
|
||||||
loss=torch.nn.MSELoss(),
|
loss=torch.nn.MSELoss(),
|
||||||
optimizer_model=torch.optim.Adam,
|
optimizer_model=torch.optim.Adam,
|
||||||
optimizer_model_kwargs={"lr" : 0.001},
|
optimizer_model_kwargs={"lr": 0.001},
|
||||||
optimizer_weights=torch.optim.Adam,
|
optimizer_weights=torch.optim.Adam,
|
||||||
optimizer_weights_kwargs={"lr" : 0.001},
|
optimizer_weights_kwargs={"lr": 0.001},
|
||||||
scheduler_model=ConstantLR,
|
scheduler_model=ConstantLR,
|
||||||
scheduler_model_kwargs={"factor" : 1, "total_iters" : 0},
|
scheduler_model_kwargs={"factor": 1, "total_iters": 0},
|
||||||
scheduler_weights=ConstantLR,
|
scheduler_weights=ConstantLR,
|
||||||
scheduler_weights_kwargs={"factor" : 1, "total_iters" : 0}
|
scheduler_weights_kwargs={"factor": 1, "total_iters": 0},
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
:param AbstractProblem problem: The formualation of the problem.
|
:param AbstractProblem problem: The formualation of the problem.
|
||||||
@@ -167,19 +167,18 @@ class SAPINN(PINNInterface):
|
|||||||
weights_dict[condition_name] = Weights(weights_function)
|
weights_dict[condition_name] = Weights(weights_function)
|
||||||
weights_dict = torch.nn.ModuleDict(weights_dict)
|
weights_dict = torch.nn.ModuleDict(weights_dict)
|
||||||
|
|
||||||
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
models=[model, weights_dict],
|
models=[model, weights_dict],
|
||||||
problem=problem,
|
problem=problem,
|
||||||
optimizers=[optimizer_model, optimizer_weights],
|
optimizers=[optimizer_model, optimizer_weights],
|
||||||
optimizers_kwargs=[
|
optimizers_kwargs=[
|
||||||
optimizer_model_kwargs,
|
optimizer_model_kwargs,
|
||||||
optimizer_weights_kwargs
|
optimizer_weights_kwargs,
|
||||||
],
|
],
|
||||||
extra_features=extra_features,
|
extra_features=extra_features,
|
||||||
loss=loss
|
loss=loss,
|
||||||
)
|
)
|
||||||
|
|
||||||
# set automatic optimization
|
# set automatic optimization
|
||||||
self.automatic_optimization = False
|
self.automatic_optimization = False
|
||||||
|
|
||||||
@@ -191,12 +190,8 @@ class SAPINN(PINNInterface):
|
|||||||
|
|
||||||
# assign schedulers
|
# assign schedulers
|
||||||
self._schedulers = [
|
self._schedulers = [
|
||||||
scheduler_model(
|
scheduler_model(self.optimizers[0], **scheduler_model_kwargs),
|
||||||
self.optimizers[0], **scheduler_model_kwargs
|
scheduler_weights(self.optimizers[1], **scheduler_weights_kwargs),
|
||||||
),
|
|
||||||
scheduler_weights(
|
|
||||||
self.optimizers[1], **scheduler_weights_kwargs
|
|
||||||
),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
self._model = self.models[0]
|
self._model = self.models[0]
|
||||||
@@ -204,7 +199,7 @@ class SAPINN(PINNInterface):
|
|||||||
|
|
||||||
self._vectorial_loss = deepcopy(loss)
|
self._vectorial_loss = deepcopy(loss)
|
||||||
self._vectorial_loss.reduction = "none"
|
self._vectorial_loss.reduction = "none"
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
"""
|
"""
|
||||||
Forward pass implementation for the PINN
|
Forward pass implementation for the PINN
|
||||||
@@ -219,7 +214,7 @@ class SAPINN(PINNInterface):
|
|||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
return self.neural_net(x)
|
return self.neural_net(x)
|
||||||
|
|
||||||
def loss_phys(self, samples, equation):
|
def loss_phys(self, samples, equation):
|
||||||
"""
|
"""
|
||||||
Computes the physics loss for the SAPINN solver based on given
|
Computes the physics loss for the SAPINN solver based on given
|
||||||
@@ -235,7 +230,7 @@ class SAPINN(PINNInterface):
|
|||||||
# train weights
|
# train weights
|
||||||
self.optimizer_weights.zero_grad()
|
self.optimizer_weights.zero_grad()
|
||||||
weighted_loss, _ = self._loss_phys(samples, equation)
|
weighted_loss, _ = self._loss_phys(samples, equation)
|
||||||
loss_value = - weighted_loss.as_subclass(torch.Tensor)
|
loss_value = -weighted_loss.as_subclass(torch.Tensor)
|
||||||
self.manual_backward(loss_value)
|
self.manual_backward(loss_value)
|
||||||
self.optimizer_weights.step()
|
self.optimizer_weights.step()
|
||||||
|
|
||||||
@@ -271,7 +266,7 @@ class SAPINN(PINNInterface):
|
|||||||
# train weights
|
# train weights
|
||||||
self.optimizer_weights.zero_grad()
|
self.optimizer_weights.zero_grad()
|
||||||
weighted_loss, _ = self._loss_data(input_tensor, output_tensor)
|
weighted_loss, _ = self._loss_data(input_tensor, output_tensor)
|
||||||
loss_value = - weighted_loss.as_subclass(torch.Tensor)
|
loss_value = -weighted_loss.as_subclass(torch.Tensor)
|
||||||
self.manual_backward(loss_value)
|
self.manual_backward(loss_value)
|
||||||
self.optimizer_weights.step()
|
self.optimizer_weights.step()
|
||||||
|
|
||||||
@@ -291,7 +286,7 @@ class SAPINN(PINNInterface):
|
|||||||
# store loss without weights
|
# store loss without weights
|
||||||
self.store_log(loss_value=float(loss))
|
self.store_log(loss_value=float(loss))
|
||||||
return loss_value
|
return loss_value
|
||||||
|
|
||||||
def configure_optimizers(self):
|
def configure_optimizers(self):
|
||||||
"""
|
"""
|
||||||
Optimizer configuration for the SAPINN
|
Optimizer configuration for the SAPINN
|
||||||
@@ -312,8 +307,8 @@ class SAPINN(PINNInterface):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
return self.optimizers, self._schedulers
|
return self.optimizers, self._schedulers
|
||||||
|
|
||||||
def on_train_batch_end(self,outputs, batch, batch_idx):
|
def on_train_batch_end(self, outputs, batch, batch_idx):
|
||||||
"""
|
"""
|
||||||
This method is called at the end of each training batch, and ovverides
|
This method is called at the end of each training batch, and ovverides
|
||||||
the PytorchLightining implementation for logging the checkpoints.
|
the PytorchLightining implementation for logging the checkpoints.
|
||||||
@@ -327,9 +322,11 @@ class SAPINN(PINNInterface):
|
|||||||
:rtype: Any
|
:rtype: Any
|
||||||
"""
|
"""
|
||||||
# increase by one the counter of optimization to save loggers
|
# increase by one the counter of optimization to save loggers
|
||||||
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += 1
|
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed += (
|
||||||
|
1
|
||||||
|
)
|
||||||
return super().on_train_batch_end(outputs, batch, batch_idx)
|
return super().on_train_batch_end(outputs, batch, batch_idx)
|
||||||
|
|
||||||
def on_train_start(self):
|
def on_train_start(self):
|
||||||
"""
|
"""
|
||||||
This method is called at the start of the training for setting
|
This method is called at the start of the training for setting
|
||||||
@@ -343,12 +340,11 @@ class SAPINN(PINNInterface):
|
|||||||
self.trainer._accelerator_connector._accelerator_flag
|
self.trainer._accelerator_connector._accelerator_flag
|
||||||
)
|
)
|
||||||
for condition_name, tensor in self.problem.input_pts.items():
|
for condition_name, tensor in self.problem.input_pts.items():
|
||||||
self.weights_dict.torchmodel[condition_name].sa_weights.data = torch.rand(
|
self.weights_dict.torchmodel[condition_name].sa_weights.data = (
|
||||||
(tensor.shape[0], 1),
|
torch.rand((tensor.shape[0], 1), device=device)
|
||||||
device = device
|
|
||||||
)
|
)
|
||||||
return super().on_train_start()
|
return super().on_train_start()
|
||||||
|
|
||||||
def on_load_checkpoint(self, checkpoint):
|
def on_load_checkpoint(self, checkpoint):
|
||||||
"""
|
"""
|
||||||
Overriding the Pytorch Lightning ``on_load_checkpoint`` to handle
|
Overriding the Pytorch Lightning ``on_load_checkpoint`` to handle
|
||||||
@@ -358,8 +354,8 @@ class SAPINN(PINNInterface):
|
|||||||
:param dict checkpoint: Pytorch Lightning checkpoint dict.
|
:param dict checkpoint: Pytorch Lightning checkpoint dict.
|
||||||
"""
|
"""
|
||||||
for condition_name, tensor in self.problem.input_pts.items():
|
for condition_name, tensor in self.problem.input_pts.items():
|
||||||
self.weights_dict.torchmodel[condition_name].sa_weights.data = torch.rand(
|
self.weights_dict.torchmodel[condition_name].sa_weights.data = (
|
||||||
(tensor.shape[0], 1)
|
torch.rand((tensor.shape[0], 1))
|
||||||
)
|
)
|
||||||
return super().on_load_checkpoint(checkpoint)
|
return super().on_load_checkpoint(checkpoint)
|
||||||
|
|
||||||
@@ -370,13 +366,13 @@ class SAPINN(PINNInterface):
|
|||||||
:param LabelTensor samples: Input samples to evaluate the physics loss.
|
:param LabelTensor samples: Input samples to evaluate the physics loss.
|
||||||
:param EquationInterface equation: the governing equation representing
|
:param EquationInterface equation: the governing equation representing
|
||||||
the physics.
|
the physics.
|
||||||
|
|
||||||
:return: tuple with weighted and not weighted scalar loss
|
:return: tuple with weighted and not weighted scalar loss
|
||||||
:rtype: List[LabelTensor, LabelTensor]
|
:rtype: List[LabelTensor, LabelTensor]
|
||||||
"""
|
"""
|
||||||
residual = self.compute_residual(samples, equation)
|
residual = self.compute_residual(samples, equation)
|
||||||
return self._compute_loss(residual)
|
return self._compute_loss(residual)
|
||||||
|
|
||||||
def _loss_data(self, input_tensor, output_tensor):
|
def _loss_data(self, input_tensor, output_tensor):
|
||||||
"""
|
"""
|
||||||
Elaboration of the loss related to data for the SAPINN solver.
|
Elaboration of the loss related to data for the SAPINN solver.
|
||||||
@@ -384,7 +380,7 @@ class SAPINN(PINNInterface):
|
|||||||
:param LabelTensor input_tensor: The input to the neural networks.
|
:param LabelTensor input_tensor: The input to the neural networks.
|
||||||
:param LabelTensor output_tensor: The true solution to compare the
|
:param LabelTensor output_tensor: The true solution to compare the
|
||||||
network solution.
|
network solution.
|
||||||
|
|
||||||
:return: tuple with weighted and not weighted scalar loss
|
:return: tuple with weighted and not weighted scalar loss
|
||||||
:rtype: List[LabelTensor, LabelTensor]
|
:rtype: List[LabelTensor, LabelTensor]
|
||||||
"""
|
"""
|
||||||
@@ -396,19 +392,21 @@ class SAPINN(PINNInterface):
|
|||||||
Elaboration of the pointwise loss through the mask model and the
|
Elaboration of the pointwise loss through the mask model and the
|
||||||
self adaptive weights
|
self adaptive weights
|
||||||
|
|
||||||
:param LabelTensor residual: the matrix of residuals that have to
|
:param LabelTensor residual: the matrix of residuals that have to
|
||||||
be weighted
|
be weighted
|
||||||
|
|
||||||
:return: tuple with weighted and not weighted loss
|
:return: tuple with weighted and not weighted loss
|
||||||
:rtype List[LabelTensor, LabelTensor]
|
:rtype List[LabelTensor, LabelTensor]
|
||||||
"""
|
"""
|
||||||
weights = self.weights_dict.torchmodel[
|
weights = self.weights_dict.torchmodel[
|
||||||
self.current_condition_name].forward()
|
self.current_condition_name
|
||||||
loss_value = self._vectorial_loss(torch.zeros_like(
|
].forward()
|
||||||
residual, requires_grad=True), residual)
|
loss_value = self._vectorial_loss(
|
||||||
|
torch.zeros_like(residual, requires_grad=True), residual
|
||||||
|
)
|
||||||
return (
|
return (
|
||||||
self._vect_to_scalar(weights * loss_value),
|
self._vect_to_scalar(weights * loss_value),
|
||||||
self._vect_to_scalar(loss_value)
|
self._vect_to_scalar(loss_value),
|
||||||
)
|
)
|
||||||
|
|
||||||
def _vect_to_scalar(self, loss_value):
|
def _vect_to_scalar(self, loss_value):
|
||||||
@@ -426,10 +424,11 @@ class SAPINN(PINNInterface):
|
|||||||
elif self.loss.reduction == "sum":
|
elif self.loss.reduction == "sum":
|
||||||
ret = torch.sum(loss_value)
|
ret = torch.sum(loss_value)
|
||||||
else:
|
else:
|
||||||
raise RuntimeError(f"Invalid reduction, got {self.loss.reduction} "
|
raise RuntimeError(
|
||||||
"but expected mean or sum.")
|
f"Invalid reduction, got {self.loss.reduction} "
|
||||||
|
"but expected mean or sum."
|
||||||
|
)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def neural_net(self):
|
def neural_net(self):
|
||||||
@@ -440,7 +439,7 @@ class SAPINN(PINNInterface):
|
|||||||
:rtype: torch.nn.Module
|
:rtype: torch.nn.Module
|
||||||
"""
|
"""
|
||||||
return self.models[0]
|
return self.models[0]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def weights_dict(self):
|
def weights_dict(self):
|
||||||
"""
|
"""
|
||||||
@@ -462,7 +461,7 @@ class SAPINN(PINNInterface):
|
|||||||
:rtype: torch.optim.lr_scheduler._LRScheduler
|
:rtype: torch.optim.lr_scheduler._LRScheduler
|
||||||
"""
|
"""
|
||||||
return self._scheduler[0]
|
return self._scheduler[0]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def scheduler_weights(self):
|
def scheduler_weights(self):
|
||||||
"""
|
"""
|
||||||
@@ -482,7 +481,7 @@ class SAPINN(PINNInterface):
|
|||||||
:rtype: torch.optim.Optimizer
|
:rtype: torch.optim.Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[0]
|
return self.optimizers[0]
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def optimizer_weights(self):
|
def optimizer_weights(self):
|
||||||
"""
|
"""
|
||||||
@@ -491,4 +490,4 @@ class SAPINN(PINNInterface):
|
|||||||
:return: The optimizer for the mask model.
|
:return: The optimizer for the mask model.
|
||||||
:rtype: torch.optim.Optimizer
|
:rtype: torch.optim.Optimizer
|
||||||
"""
|
"""
|
||||||
return self.optimizers[1]
|
return self.optimizers[1]
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import torch
|
|||||||
|
|
||||||
from pina.solvers import SupervisedSolver
|
from pina.solvers import SupervisedSolver
|
||||||
|
|
||||||
|
|
||||||
class ReducedOrderModelSolver(SupervisedSolver):
|
class ReducedOrderModelSolver(SupervisedSolver):
|
||||||
r"""
|
r"""
|
||||||
ReducedOrderModelSolver solver class. This class implements a
|
ReducedOrderModelSolver solver class. This class implements a
|
||||||
@@ -114,10 +115,13 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
rate scheduler.
|
rate scheduler.
|
||||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||||
"""
|
"""
|
||||||
model = torch.nn.ModuleDict({
|
model = torch.nn.ModuleDict(
|
||||||
'reduction_network' : reduction_network,
|
{
|
||||||
'interpolation_network' : interpolation_network})
|
"reduction_network": reduction_network,
|
||||||
|
"interpolation_network": interpolation_network,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
model=model,
|
model=model,
|
||||||
problem=problem,
|
problem=problem,
|
||||||
@@ -125,18 +129,22 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
optimizer=optimizer,
|
optimizer=optimizer,
|
||||||
optimizer_kwargs=optimizer_kwargs,
|
optimizer_kwargs=optimizer_kwargs,
|
||||||
scheduler=scheduler,
|
scheduler=scheduler,
|
||||||
scheduler_kwargs=scheduler_kwargs
|
scheduler_kwargs=scheduler_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
# assert reduction object contains encode/ decode
|
# assert reduction object contains encode/ decode
|
||||||
if not hasattr(self.neural_net['reduction_network'], 'encode'):
|
if not hasattr(self.neural_net["reduction_network"], "encode"):
|
||||||
raise SyntaxError('reduction_network must have encode method. '
|
raise SyntaxError(
|
||||||
'The encode method should return a lower '
|
"reduction_network must have encode method. "
|
||||||
'dimensional representation of the input.')
|
"The encode method should return a lower "
|
||||||
if not hasattr(self.neural_net['reduction_network'], 'decode'):
|
"dimensional representation of the input."
|
||||||
raise SyntaxError('reduction_network must have decode method. '
|
)
|
||||||
'The decode method should return a high '
|
if not hasattr(self.neural_net["reduction_network"], "decode"):
|
||||||
'dimensional representation of the encoding.')
|
raise SyntaxError(
|
||||||
|
"reduction_network must have decode method. "
|
||||||
|
"The decode method should return a high "
|
||||||
|
"dimensional representation of the encoding."
|
||||||
|
)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
"""
|
"""
|
||||||
@@ -149,8 +157,8 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
:return: Solver solution.
|
:return: Solver solution.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
reduction_network = self.neural_net['reduction_network']
|
reduction_network = self.neural_net["reduction_network"]
|
||||||
interpolation_network = self.neural_net['interpolation_network']
|
interpolation_network = self.neural_net["interpolation_network"]
|
||||||
return reduction_network.decode(interpolation_network(x))
|
return reduction_network.decode(interpolation_network(x))
|
||||||
|
|
||||||
def loss_data(self, input_pts, output_pts):
|
def loss_data(self, input_pts, output_pts):
|
||||||
@@ -167,17 +175,18 @@ class ReducedOrderModelSolver(SupervisedSolver):
|
|||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
# extract networks
|
# extract networks
|
||||||
reduction_network = self.neural_net['reduction_network']
|
reduction_network = self.neural_net["reduction_network"]
|
||||||
interpolation_network = self.neural_net['interpolation_network']
|
interpolation_network = self.neural_net["interpolation_network"]
|
||||||
# encoded representations loss
|
# encoded representations loss
|
||||||
encode_repr_inter_net = interpolation_network(input_pts)
|
encode_repr_inter_net = interpolation_network(input_pts)
|
||||||
encode_repr_reduction_network = reduction_network.encode(output_pts)
|
encode_repr_reduction_network = reduction_network.encode(output_pts)
|
||||||
loss_encode = self.loss(encode_repr_inter_net,
|
loss_encode = self.loss(
|
||||||
encode_repr_reduction_network)
|
encode_repr_inter_net, encode_repr_reduction_network
|
||||||
|
)
|
||||||
# reconstruction loss
|
# reconstruction loss
|
||||||
loss_reconstruction = self.loss(
|
loss_reconstruction = self.loss(
|
||||||
reduction_network.decode(encode_repr_reduction_network),
|
reduction_network.decode(encode_repr_reduction_network), output_pts
|
||||||
output_pts)
|
)
|
||||||
|
|
||||||
return loss_encode + loss_reconstruction
|
return loss_encode + loss_reconstruction
|
||||||
|
|
||||||
|
|||||||
@@ -142,13 +142,13 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
The problem formulation."""
|
The problem formulation."""
|
||||||
return self._pina_problem
|
return self._pina_problem
|
||||||
|
|
||||||
def on_train_start(self):
|
def on_train_start(self):
|
||||||
"""
|
"""
|
||||||
On training epoch start this function is call to do global checks for
|
On training epoch start this function is call to do global checks for
|
||||||
the different solvers.
|
the different solvers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# 1. Check the verison for dataloader
|
# 1. Check the verison for dataloader
|
||||||
dataloader = self.trainer.train_dataloader
|
dataloader = self.trainer.train_dataloader
|
||||||
if sys.version_info < (3, 8):
|
if sys.version_info < (3, 8):
|
||||||
|
|||||||
@@ -118,7 +118,7 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:return: The sum of the loss functions.
|
:return: The sum of the loss functions.
|
||||||
:rtype: LabelTensor
|
:rtype: LabelTensor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
condition_idx = batch["condition"]
|
condition_idx = batch["condition"]
|
||||||
|
|
||||||
for condition_id in range(condition_idx.min(), condition_idx.max() + 1):
|
for condition_id in range(condition_idx.min(), condition_idx.max() + 1):
|
||||||
@@ -162,7 +162,7 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
return self.loss(self.forward(input_pts), output_pts)
|
return self.loss(self.forward(input_pts), output_pts)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def scheduler(self):
|
def scheduler(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -67,9 +67,9 @@ class Trainer(pytorch_lightning.Trainer):
|
|||||||
pb = self._model.problem
|
pb = self._model.problem
|
||||||
if hasattr(pb, "unknown_parameters"):
|
if hasattr(pb, "unknown_parameters"):
|
||||||
for key in pb.unknown_parameters:
|
for key in pb.unknown_parameters:
|
||||||
pb.unknown_parameters[key] = torch.nn.Parameter(pb.unknown_parameters[key].data.to(device))
|
pb.unknown_parameters[key] = torch.nn.Parameter(
|
||||||
|
pb.unknown_parameters[key].data.to(device)
|
||||||
|
)
|
||||||
|
|
||||||
def train(self, **kwargs):
|
def train(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
|||||||
Reference in New Issue
Block a user