Documentation for v0.1 version (#199)
* Adding Equations, solving typos * improve _code.rst * the team rst and restuctore index.rst * fixing errors --------- Co-authored-by: Dario Coscia <dariocoscia@dhcp-015.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
3f9305d475
commit
8b7b61b3bd
@@ -1,8 +1,12 @@
|
||||
__all__ = [
|
||||
'PINN',
|
||||
'GAROM',
|
||||
'SupervisedSolver',
|
||||
'SolverInterface'
|
||||
|
||||
]
|
||||
|
||||
from .garom import GAROM
|
||||
from .pinn import PINN
|
||||
from .supervised import SupervisedSolver
|
||||
from .solver import SolverInterface
|
||||
|
||||
@@ -4,7 +4,7 @@ import torch
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
from .solver import SolverInterface
|
||||
@@ -22,28 +22,36 @@ class GAROM(SolverInterface):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Coscia, D., Demo, N., & Rozza, G. (2023).
|
||||
Generative Adversarial Reduced Order Modelling.
|
||||
arXiv preprint arXiv:2305.15881.
|
||||
*Generative Adversarial Reduced Order Modelling*.
|
||||
DOI: `arXiv preprint arXiv:2305.15881.
|
||||
<https://doi.org/10.48550/arXiv.2305.15881>`_.
|
||||
"""
|
||||
def __init__(self,
|
||||
problem,
|
||||
generator,
|
||||
discriminator,
|
||||
extra_features=None,
|
||||
loss = None,
|
||||
optimizer_generator=torch.optim.Adam,
|
||||
optimizer_generator_kwargs={'lr' : 0.001},
|
||||
optimizer_discriminator=torch.optim.Adam,
|
||||
optimizer_discriminator_kwargs={'lr' : 0.001},
|
||||
scheduler_generator=ConstantLR,
|
||||
scheduler_generator_kwargs={"factor": 1, "total_iters": 0},
|
||||
scheduler_discriminator=ConstantLR,
|
||||
scheduler_discriminator_kwargs={"factor": 1, "total_iters": 0},
|
||||
gamma = 0.3,
|
||||
lambda_k = 0.001,
|
||||
regularizer = False,
|
||||
):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
generator,
|
||||
discriminator,
|
||||
extra_features=None,
|
||||
loss=None,
|
||||
optimizer_generator=torch.optim.Adam,
|
||||
optimizer_generator_kwargs={'lr': 0.001},
|
||||
optimizer_discriminator=torch.optim.Adam,
|
||||
optimizer_discriminator_kwargs={'lr': 0.001},
|
||||
scheduler_generator=ConstantLR,
|
||||
scheduler_generator_kwargs={
|
||||
"factor": 1,
|
||||
"total_iters": 0
|
||||
},
|
||||
scheduler_discriminator=ConstantLR,
|
||||
scheduler_discriminator_kwargs={
|
||||
"factor": 1,
|
||||
"total_iters": 0
|
||||
},
|
||||
gamma=0.3,
|
||||
lambda_k=0.001,
|
||||
regularizer=False,
|
||||
):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module generator: The neural network model to use
|
||||
@@ -77,11 +85,11 @@ class GAROM(SolverInterface):
|
||||
rate scheduler for the discriminator.
|
||||
:param dict scheduler_discriminator_kwargs: LR scheduler constructor keyword args.
|
||||
:param gamma: Ratio of expected loss for generator and discriminator, defaults to 0.3.
|
||||
:type gamma: float, optional
|
||||
:type gamma: float
|
||||
:param lambda_k: Learning rate for control theory optimization, defaults to 0.001.
|
||||
:type lambda_k: float, optional
|
||||
:type lambda_k: float
|
||||
:param regularizer: Regularization term in the GAROM loss, defaults to False.
|
||||
:type regularizer: bool, optional
|
||||
:type regularizer: bool
|
||||
|
||||
.. warning::
|
||||
The algorithm works only for data-driven model. Hence in the ``problem`` definition
|
||||
@@ -90,22 +98,27 @@ class GAROM(SolverInterface):
|
||||
"""
|
||||
|
||||
if isinstance(extra_features, dict):
|
||||
extra_features = [extra_features['generator'], extra_features['discriminator']]
|
||||
extra_features = [
|
||||
extra_features['generator'], extra_features['discriminator']
|
||||
]
|
||||
|
||||
super().__init__(
|
||||
models=[generator, discriminator],
|
||||
problem=problem,
|
||||
extra_features=extra_features,
|
||||
optimizers=[optimizer_generator, optimizer_discriminator],
|
||||
optimizers_kwargs=[
|
||||
optimizer_generator_kwargs, optimizer_discriminator_kwargs
|
||||
])
|
||||
|
||||
super().__init__(models=[generator, discriminator],
|
||||
problem=problem,
|
||||
extra_features=extra_features,
|
||||
optimizers=[optimizer_generator, optimizer_discriminator],
|
||||
optimizers_kwargs=[optimizer_generator_kwargs, optimizer_discriminator_kwargs])
|
||||
|
||||
# set automatic optimization for GANs
|
||||
self.automatic_optimization = False
|
||||
|
||||
# set loss
|
||||
if loss is None:
|
||||
loss = PowerLoss(p=1)
|
||||
|
||||
# check consistency
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler_generator, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_generator_kwargs, dict)
|
||||
check_consistency(scheduler_discriminator, LRScheduler, subclass=True)
|
||||
@@ -134,6 +147,20 @@ class GAROM(SolverInterface):
|
||||
self.regularizer = float(regularizer)
|
||||
|
||||
def forward(self, x, mc_steps=20, variance=False):
|
||||
"""
|
||||
Forward step for GAROM solver
|
||||
|
||||
:param x: The input tensor.
|
||||
:type x: torch.Tensor
|
||||
:param mc_steps: Number of montecarlo samples to approximate the
|
||||
expected value, defaults to 20.
|
||||
:type mc_steps: int
|
||||
:param variance: Returining also the sample variance of the solution, defaults to False.
|
||||
:type variance: bool
|
||||
:return: The expected value of the generator distribution. If ``variance=True`` also the
|
||||
sample variance is returned.
|
||||
:rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor)
|
||||
"""
|
||||
|
||||
# sampling
|
||||
field_sample = [self.sample(x) for _ in range(mc_steps)]
|
||||
@@ -147,10 +174,11 @@ class GAROM(SolverInterface):
|
||||
return mean, var
|
||||
|
||||
return mean
|
||||
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""Optimizer configuration for the GAROM
|
||||
solver.
|
||||
"""
|
||||
Optimizer configuration for the GAROM
|
||||
solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
@@ -220,7 +248,7 @@ class GAROM(SolverInterface):
|
||||
return diff
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
"""PINN solver training step.
|
||||
"""GAROM solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
@@ -265,27 +293,27 @@ class GAROM(SolverInterface):
|
||||
self.log('stability_metric', float(d_loss_real + torch.abs(diff)), prog_bar=True, logger=True, on_epoch=True, on_step=False)
|
||||
|
||||
return
|
||||
|
||||
|
||||
@property
|
||||
def generator(self):
|
||||
return self.models[0]
|
||||
|
||||
|
||||
@property
|
||||
def discriminator(self):
|
||||
return self.models[1]
|
||||
|
||||
|
||||
@property
|
||||
def optimizer_generator(self):
|
||||
return self.optimizers[0]
|
||||
|
||||
|
||||
@property
|
||||
def optimizer_discriminator(self):
|
||||
return self.optimizers[1]
|
||||
|
||||
|
||||
@property
|
||||
def scheduler_generator(self):
|
||||
return self._schedulers[0]
|
||||
|
||||
|
||||
@property
|
||||
def scheduler_discriminator(self):
|
||||
return self._schedulers[1]
|
||||
|
||||
@@ -3,7 +3,7 @@ import torch
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
@@ -13,7 +13,6 @@ from ..utils import check_consistency
|
||||
from ..loss import LossInterface
|
||||
from torch.nn.modules.loss import _Loss
|
||||
|
||||
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
|
||||
|
||||
@@ -30,27 +29,31 @@ class PINN(SolverInterface):
|
||||
Physics-informed machine learning. Nature Reviews Physics, 3(6), 422-440.
|
||||
<https://doi.org/10.1038/s42254-021-00314-5>`_.
|
||||
"""
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss = torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={'lr' : 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={'lr': 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={
|
||||
"factor": 1,
|
||||
"total_iters": 0
|
||||
},
|
||||
):
|
||||
'''
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default torch.nn.MSELoss().
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is `torch.optim.Adam`.
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param float lr: The learning rate; default is 0.001.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
rate scheduler.
|
||||
:param dict scheduler_kwargs: LR scheduler constructor keyword args.
|
||||
@@ -60,8 +63,8 @@ class PINN(SolverInterface):
|
||||
optimizers=[optimizer],
|
||||
optimizers_kwargs=[optimizer_kwargs],
|
||||
extra_features=extra_features)
|
||||
|
||||
# check consistency
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_kwargs, dict)
|
||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||
@@ -71,14 +74,14 @@ class PINN(SolverInterface):
|
||||
self._loss = loss
|
||||
self._neural_net = self.models[0]
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass implementation for the PINN
|
||||
solver.
|
||||
"""
|
||||
Forward pass implementation for the PINN
|
||||
solver.
|
||||
|
||||
:param torch.tensor x: Input data.
|
||||
:param torch.Tensor x: Input tensor.
|
||||
:return: PINN solution.
|
||||
:rtype: torch.tensor
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# extract labels
|
||||
x = x.extract(self.problem.input_variables)
|
||||
@@ -89,8 +92,9 @@ class PINN(SolverInterface):
|
||||
return output
|
||||
|
||||
def configure_optimizers(self):
|
||||
"""Optimizer configuration for the PINN
|
||||
solver.
|
||||
"""
|
||||
Optimizer configuration for the PINN
|
||||
solver.
|
||||
|
||||
:return: The optimizers and the schedulers
|
||||
:rtype: tuple(list, list)
|
||||
@@ -107,7 +111,8 @@ class PINN(SolverInterface):
|
||||
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
"""PINN solver training step.
|
||||
"""
|
||||
PINN solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
@@ -159,17 +164,17 @@ class PINN(SolverInterface):
|
||||
Scheduler for the PINN training.
|
||||
"""
|
||||
return self._scheduler
|
||||
|
||||
|
||||
@property
|
||||
def neural_net(self):
|
||||
"""
|
||||
Neural network for the PINN training.
|
||||
"""
|
||||
return self._neural_net
|
||||
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
"""
|
||||
Loss for the PINN training.
|
||||
"""
|
||||
return self._loss
|
||||
return self._loss
|
||||
|
||||
@@ -2,30 +2,38 @@
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from ..model.network import Network
|
||||
import pytorch_lightning as pl
|
||||
import pytorch_lightning
|
||||
from ..utils import check_consistency
|
||||
from ..problem import AbstractProblem
|
||||
import torch
|
||||
|
||||
|
||||
class SolverInterface(pl.LightningModule, metaclass=ABCMeta):
|
||||
""" Solver base class. """
|
||||
class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
"""
|
||||
Solver base class. This class inherits is a wrapper of
|
||||
LightningModule class, inheriting all the
|
||||
LightningModule methods.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
models,
|
||||
problem,
|
||||
optimizers,
|
||||
optimizers_kwargs,
|
||||
optimizers,
|
||||
optimizers_kwargs,
|
||||
extra_features=None):
|
||||
"""
|
||||
:param models: A torch neural network model instance.
|
||||
:type models: torch.nn.Module
|
||||
:param problem: A problem definition instance.
|
||||
:type problem: AbstractProblem
|
||||
:param list(torch.nn.Module) extra_features: the additional input
|
||||
features to use as augmented input. If ``None`` no extra features
|
||||
are passed. If it is a list of ``torch.nn.Module``, the extra feature
|
||||
list is passed to all models. If it is a list of extra features' lists,
|
||||
each single list of extra feature is passed to a model.
|
||||
:param list(torch.optim.Optimizer) optimizer: A list of neural network optimizers to
|
||||
use.
|
||||
:param list(dict) optimizer_kwargs: A list of optimizer constructor keyword args.
|
||||
:param list(torch.nn.Module) extra_features: The additional input
|
||||
features to use as augmented input. If ``None`` no extra features
|
||||
are passed. If it is a list of :class:`torch.nn.Module`, the extra feature
|
||||
list is passed to all models. If it is a list of extra features' lists,
|
||||
each single list of extra feature is passed to a model.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@@ -52,37 +60,40 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta):
|
||||
raise ValueError('You must define one optimizer for each model.'
|
||||
f'Got {len_model} models, and {len_optimizer}'
|
||||
' optimizers.')
|
||||
|
||||
|
||||
# check length consistency optimizers kwargs
|
||||
if len_optimizer_kwargs != len_optimizer:
|
||||
raise ValueError('You must define one dictionary of keyword'
|
||||
' arguments for each optimizers.'
|
||||
f'Got {len_optimizer} optimizers, and'
|
||||
f' {len_optimizer_kwargs} dicitionaries')
|
||||
|
||||
|
||||
# extra features handling
|
||||
if extra_features is None:
|
||||
if extra_features is None:
|
||||
extra_features = [None] * len_model
|
||||
else:
|
||||
# if we only have a list of extra features
|
||||
if not isinstance(extra_features[0], (tuple, list)):
|
||||
extra_features = [extra_features] * len_model
|
||||
else: # if we have a list of list extra features
|
||||
else: # if we have a list of list extra features
|
||||
if len(extra_features) != len_model:
|
||||
raise ValueError('You passed a list of extrafeatures list with len'
|
||||
f'different of models len. Expected {len_model} '
|
||||
f'got {len(extra_features)}. If you want to use'
|
||||
'the same list of extra features for all models, '
|
||||
'just pass a list of extrafeatures and not a list '
|
||||
'of list of extra features.')
|
||||
|
||||
raise ValueError(
|
||||
'You passed a list of extrafeatures list with len'
|
||||
f'different of models len. Expected {len_model} '
|
||||
f'got {len(extra_features)}. If you want to use'
|
||||
'the same list of extra features for all models, '
|
||||
'just pass a list of extrafeatures and not a list '
|
||||
'of list of extra features.')
|
||||
|
||||
# assigning model and optimizers
|
||||
self._pina_models = []
|
||||
self._pina_optimizers = []
|
||||
|
||||
for idx in range(len_model):
|
||||
model_ = Network(model=models[idx], extra_features=extra_features[idx])
|
||||
optim_ = optimizers[idx](model_.parameters(), **optimizers_kwargs[idx])
|
||||
model_ = Network(model=models[idx],
|
||||
extra_features=extra_features[idx])
|
||||
optim_ = optimizers[idx](model_.parameters(),
|
||||
**optimizers_kwargs[idx])
|
||||
self._pina_models.append(model_)
|
||||
self._pina_optimizers.append(optim_)
|
||||
|
||||
@@ -90,9 +101,9 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta):
|
||||
self._pina_problem = problem
|
||||
|
||||
@abstractmethod
|
||||
def forward(self):
|
||||
def forward(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def training_step(self):
|
||||
pass
|
||||
@@ -131,4 +142,4 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta):
|
||||
# """
|
||||
# Set the problem formulation."""
|
||||
# check_consistency(problem, AbstractProblem, 'pina problem')
|
||||
# self._problem = problem
|
||||
# self._problem = problem
|
||||
|
||||
@@ -3,7 +3,7 @@ import torch
|
||||
try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0
|
||||
|
||||
from torch.optim.lr_scheduler import ConstantLR
|
||||
|
||||
@@ -19,25 +19,30 @@ class SupervisedSolver(SolverInterface):
|
||||
SupervisedSolver solver class. This class implements a SupervisedSolver,
|
||||
using a user specified ``model`` to solve a specific ``problem``.
|
||||
"""
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss = torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={'lr' : 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
extra_features=None,
|
||||
loss=torch.nn.MSELoss(),
|
||||
optimizer=torch.optim.Adam,
|
||||
optimizer_kwargs={'lr': 0.001},
|
||||
scheduler=ConstantLR,
|
||||
scheduler_kwargs={
|
||||
"factor": 1,
|
||||
"total_iters": 0
|
||||
},
|
||||
):
|
||||
'''
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
:param torch.nn.Module loss: The loss function used as minimizer,
|
||||
default torch.nn.MSELoss().
|
||||
default :class:`torch.nn.MSELoss`.
|
||||
:param torch.nn.Module extra_features: The additional input
|
||||
features to use as augmented input.
|
||||
:param torch.optim.Optimizer optimizer: The neural network optimizer to
|
||||
use; default is `torch.optim.Adam`.
|
||||
use; default is :class:`torch.optim.Adam`.
|
||||
:param dict optimizer_kwargs: Optimizer constructor keyword args.
|
||||
:param float lr: The learning rate; default is 0.001.
|
||||
:param torch.optim.LRScheduler scheduler: Learning
|
||||
@@ -49,8 +54,8 @@ class SupervisedSolver(SolverInterface):
|
||||
optimizers=[optimizer],
|
||||
optimizers_kwargs=[optimizer_kwargs],
|
||||
extra_features=extra_features)
|
||||
|
||||
# check consistency
|
||||
|
||||
# check consistency
|
||||
check_consistency(scheduler, LRScheduler, subclass=True)
|
||||
check_consistency(scheduler_kwargs, dict)
|
||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||
@@ -60,13 +65,12 @@ class SupervisedSolver(SolverInterface):
|
||||
self._loss = loss
|
||||
self._neural_net = self.models[0]
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass implementation for the solver.
|
||||
|
||||
:param torch.tensor x: Input data.
|
||||
:param torch.Tensor x: Input tensor.
|
||||
:return: Solver solution.
|
||||
:rtype: torch.tensor
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# extract labels
|
||||
x = x.extract(self.problem.input_variables)
|
||||
@@ -83,7 +87,7 @@ class SupervisedSolver(SolverInterface):
|
||||
:rtype: tuple(list, list)
|
||||
"""
|
||||
return self.optimizers, [self.scheduler]
|
||||
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
"""Solver training step.
|
||||
|
||||
@@ -105,9 +109,11 @@ class SupervisedSolver(SolverInterface):
|
||||
# data loss
|
||||
if hasattr(condition, 'output_points'):
|
||||
input_pts, output_pts = samples
|
||||
loss = self.loss(self.forward(input_pts), output_pts) * condition.data_weight
|
||||
loss = self.loss(self.forward(input_pts),
|
||||
output_pts) * condition.data_weight
|
||||
else:
|
||||
raise RuntimeError('Supervised solver works only in data-driven mode.')
|
||||
raise RuntimeError(
|
||||
'Supervised solver works only in data-driven mode.')
|
||||
|
||||
self.log('mean_loss', float(loss), prog_bar=True, logger=True)
|
||||
return loss
|
||||
@@ -118,17 +124,17 @@ class SupervisedSolver(SolverInterface):
|
||||
Scheduler for training.
|
||||
"""
|
||||
return self._scheduler
|
||||
|
||||
|
||||
@property
|
||||
def neural_net(self):
|
||||
"""
|
||||
Neural network for training.
|
||||
"""
|
||||
return self._neural_net
|
||||
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
"""
|
||||
Loss for training.
|
||||
"""
|
||||
return self._loss
|
||||
return self._loss
|
||||
|
||||
Reference in New Issue
Block a user