Correct codacy warnings

This commit is contained in:
FilippoOlivo
2024-10-22 14:26:39 +02:00
committed by Nicola Demo
parent c9304fb9bb
commit 1bc1b3a580
15 changed files with 252 additions and 210 deletions

View File

@@ -17,15 +17,13 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
LightningModule methods.
"""
def __init__(
self,
models,
problem,
optimizers,
schedulers,
extra_features,
use_lt=True
):
def __init__(self,
models,
problem,
optimizers,
schedulers,
extra_features,
use_lt=True):
"""
:param model: A torch neural network model instance.
:type model: torch.nn.Module
@@ -55,10 +53,11 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
if use_lt is True:
for idx in range(len(models)):
models[idx] = Network(
model = models[idx],
model=models[idx],
input_variables=problem.input_variables,
output_variables=problem.output_variables,
extra_features=extra_features, )
extra_features=extra_features,
)
#Check scheduler consistency + encapsulation
if not isinstance(schedulers, list):
@@ -79,11 +78,9 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
# check length consistency optimizers
if len_model != len_optimizer:
raise ValueError(
"You must define one optimizer for each model."
f"Got {len_model} models, and {len_optimizer}"
" optimizers."
)
raise ValueError("You must define one optimizer for each model."
f"Got {len_model} models, and {len_optimizer}"
" optimizers.")
# extra features handling
@@ -92,7 +89,6 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
self._pina_schedulers = schedulers
self._pina_problem = problem
@abstractmethod
def forward(self, *args, **kwargs):
pass
@@ -142,5 +138,8 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
TODO
"""
for _, condition in problem.conditions.items():
if not set(self.accepted_condition_types).issubset(condition.condition_type):
raise ValueError(f'{self.__name__} support only dose not support condition {condition.condition_type}')
if not set(self.accepted_condition_types).issubset(
condition.condition_type):
raise ValueError(
f'{self.__name__} support only dose not support condition {condition.condition_type}'
)

View File

@@ -40,15 +40,13 @@ class SupervisedSolver(SolverInterface):
accepted_condition_types = ['supervised']
__name__ = 'SupervisedSolver'
def __init__(
self,
problem,
model,
loss=None,
optimizer=None,
scheduler=None,
extra_features=None
):
def __init__(self,
problem,
model,
loss=None,
optimizer=None,
scheduler=None,
extra_features=None):
"""
:param AbstractProblem problem: The formualation of the problem.
:param torch.nn.Module model: The neural network model to use.
@@ -68,16 +66,13 @@ class SupervisedSolver(SolverInterface):
optimizer = TorchOptimizer(torch.optim.Adam, lr=0.001)
if scheduler is None:
scheduler = TorchScheduler(
torch.optim.lr_scheduler.ConstantLR)
scheduler = TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
super().__init__(
models=model,
problem=problem,
optimizers=optimizer,
schedulers=scheduler,
extra_features=extra_features
)
super().__init__(models=model,
problem=problem,
optimizers=optimizer,
schedulers=scheduler,
extra_features=extra_features)
# check consistency
check_consistency(loss, (LossInterface, _Loss), subclass=False)
@@ -107,10 +102,8 @@ class SupervisedSolver(SolverInterface):
"""
self._optimizer.hook(self._model.parameters())
self._scheduler.hook(self._optimizer)
return (
[self._optimizer.optimizer_instance],
[self._scheduler.scheduler_instance]
)
return ([self._optimizer.optimizer_instance],
[self._scheduler.scheduler_instance])
def training_step(self, batch, batch_idx):
"""Solver training step.
@@ -136,8 +129,7 @@ class SupervisedSolver(SolverInterface):
# for data driven mode
if not hasattr(condition, "output_points"):
raise NotImplementedError(
f"{type(self).__name__} works only in data-driven mode."
)
f"{type(self).__name__} works only in data-driven mode.")
output_pts = out[condition_idx == condition_id]
input_pts = pts[condition_idx == condition_id]
@@ -145,9 +137,7 @@ class SupervisedSolver(SolverInterface):
input_pts.labels = pts.labels
output_pts.labels = out.labels
loss = (
self.loss_data(input_pts=input_pts, output_pts=output_pts)
)
loss = (self.loss_data(input_pts=input_pts, output_pts=output_pts))
loss = loss.as_subclass(torch.Tensor)
self.log("mean_loss", float(loss), prog_bar=True, logger=True)