Documentation for v0.1 version (#199)

* Adding Equations, solving typos
* improve _code.rst
* the team rst and restuctore index.rst
* fixing errors

---------

Co-authored-by: Dario Coscia <dariocoscia@dhcp-015.eduroam.sissa.it>
This commit is contained in:
Dario Coscia
2023-11-08 14:39:00 +01:00
committed by Nicola Demo
parent 3f9305d475
commit 8b7b61b3bd
144 changed files with 2741 additions and 1766 deletions

View File

@@ -1,9 +1,5 @@
__all__ = [
'SwitchOptimizer',
'R3Refinement',
'MetricTracker'
]
__all__ = ['SwitchOptimizer', 'R3Refinement', 'MetricTracker']
from .optimizer_callbacks import SwitchOptimizer
from .adaptive_refinment_callbacks import R3Refinement
from .processing_callbacks import MetricTracker
from .processing_callbacks import MetricTracker

View File

@@ -6,37 +6,37 @@ import torch
from ..utils import check_consistency
class R3Refinement(Callback):
"""
PINA implementation of a R3 Refinement Callback.
.. seealso::
**Original reference**: Daw, Arka, et al. "Mitigating Propagation Failures
in Physics-informed Neural Networks using
Retain-Resample-Release (R3) Sampling." (2023).
DOI: `10.48550/arXiv.2207.02338
< https://doi.org/10.48550/arXiv.2207.02338>`_
"""
def __init__(self, sample_every):
"""
R3 routine for sampling new points based on
adpative search. The algorithm incrementally
accumulate collocation points in regions of
high PDE residuals, and release the one which
have low residual. Points are sampled uniformmaly
in all region where sampling is needed.
PINA Implementation of an R3 Refinement Callback.
This callback implements the R3 (Retain-Resample-Release) routine for sampling new points based on adaptive search.
The algorithm incrementally accumulates collocation points in regions of high PDE residuals, and releases those
with low residuals. Points are sampled uniformly in all regions where sampling is needed.
.. seealso::
Original Reference: Daw, Arka, et al. *Mitigating Propagation Failures in Physics-informed Neural Networks
using Retain-Resample-Release (R3) Sampling. (2023)*.
DOI: `10.48550/arXiv.2207.02338
<https://doi.org/10.48550/arXiv.2207.02338>`_
:param int sample_every: Frequency for sampling.
:raises ValueError: If `sample_every` is not an integer.
Example:
>>> r3_callback = R3Refinement(sample_every=5)
"""
super().__init__()
# sample every
check_consistency(sample_every, int)
self._sample_every = sample_every
def _compute_residual(self, trainer):
"""
Computes the residuals for a PINN object.
@@ -63,7 +63,7 @@ class R3Refinement(Callback):
target = condition.equation.residual(pts, solver.forward(pts))
res_loss[location] = torch.abs(target).as_subclass(torch.Tensor)
tot_loss.append(torch.abs(target))
return torch.vstack(tot_loss), res_loss
def _r3_routine(self, trainer):
@@ -79,7 +79,7 @@ class R3Refinement(Callback):
# !!!!!! From now everything is performed on CPU !!!!!!
# average loss
avg = (tot_loss.mean()).to('cpu')
avg = (tot_loss.mean()).to('cpu')
# points to keep
old_pts = {}
@@ -97,16 +97,18 @@ class R3Refinement(Callback):
tot_points += len(pts)
# extract new points to sample uniformally for each location
n_points = (self._tot_pop_numb - tot_points ) // len(self._sampling_locations)
remainder = (self._tot_pop_numb - tot_points ) % len(self._sampling_locations)
n_points = (self._tot_pop_numb - tot_points) // len(
self._sampling_locations)
remainder = (self._tot_pop_numb - tot_points) % len(
self._sampling_locations)
n_uniform_points = [n_points] * len(self._sampling_locations)
n_uniform_points[-1] += remainder
# sample new points
for numb_pts, loc in zip(n_uniform_points, self._sampling_locations):
trainer._model.problem.discretise_domain(numb_pts,
'random',
locations=[loc])
'random',
locations=[loc])
# adding previous population points
trainer._model.problem.add_points(old_pts)
@@ -114,6 +116,18 @@ class R3Refinement(Callback):
trainer._create_or_update_loader()
def on_train_start(self, trainer, _):
"""
Callback function called at the start of training.
This method extracts the locations for sampling from the problem conditions and calculates the total population.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param _: Placeholder argument (not used).
:return: None
:rtype: None
"""
# extract locations for sampling
problem = trainer._model.problem
locations = []
@@ -122,7 +136,7 @@ class R3Refinement(Callback):
if hasattr(condition, 'location'):
locations.append(condition_name)
self._sampling_locations = locations
# extract total population
total_population = 0
for location in self._sampling_locations:
@@ -131,5 +145,17 @@ class R3Refinement(Callback):
self._tot_pop_numb = total_population
def on_train_epoch_end(self, trainer, __):
"""
Callback function called at the end of each training epoch.
This method triggers the R3 routine for refinement if the current epoch is a multiple of `_sample_every`.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param __: Placeholder argument (not used).
:return: None
:rtype: None
"""
if trainer.current_epoch % self._sample_every == 0:
self._r3_routine(trainer)
self._r3_routine(trainer)

View File

@@ -6,22 +6,30 @@ from ..utils import check_consistency
class SwitchOptimizer(Callback):
"""
PINA implementation of a Lightining Callback to switch
optimizer during training. The rouutine can be used to
try multiple optimizers during the training, without the
need to stop training.
"""
def __init__(self, new_optimizers, new_optimizers_kwargs, epoch_switch):
"""
SwitchOptimizer is a routine for switching optimizer during training.
PINA Implementation of a Lightning Callback to switch optimizer during training.
:param torch.optim.Optimizer | list new_optimizers: The model optimizers to
switch to. It must be a list of :class:`torch.optim.Optimizer` or list of
:class:`torch.optim.Optimizer` for multiple model solvers.
:param dict| list new_optimizers: The model optimizers keyword arguments to
switch use. It must be a dict or list of dict for multiple optimizers.
:param int epoch_switch: Epoch for switching optimizer.
This callback allows for switching between different optimizers during training, enabling
the exploration of multiple optimization strategies without the need to stop training.
:param new_optimizers: The model optimizers to switch to. Can be a single
:class:`torch.optim.Optimizer` or a list of them for multiple model solvers.
:type new_optimizers: torch.optim.Optimizer | list
:param new_optimizers_kwargs: The keyword arguments for the new optimizers. Can be a single dictionary
or a list of dictionaries corresponding to each optimizer.
:type new_optimizers_kwargs: dict | list
:param epoch_switch: The epoch at which to switch to the new optimizer.
:type epoch_switch: int
:raises ValueError: If `epoch_switch` is less than 1 or if there is a mismatch in the number of
optimizers and their corresponding keyword argument dictionaries.
Example:
>>> switch_callback = SwitchOptimizer(new_optimizers=[optimizer1, optimizer2],
>>> new_optimizers_kwargs=[{'lr': 0.001}, {'lr': 0.01}],
>>> epoch_switch=10)
"""
super().__init__()
@@ -44,19 +52,29 @@ class SwitchOptimizer(Callback):
' arguments for each optimizers.'
f' Got {len_optimizer} optimizers, and'
f' {len_optimizer_kwargs} dicitionaries')
# save new optimizers
self._new_optimizers = new_optimizers
self._new_optimizers_kwargs = new_optimizers_kwargs
self._epoch_switch = epoch_switch
def on_train_epoch_start(self, trainer, __):
"""
Callback function to switch optimizer at the start of each training epoch.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param _: Placeholder argument (not used).
:return: None
:rtype: None
"""
if trainer.current_epoch == self._epoch_switch:
optims = []
for idx, (optim, optim_kwargs) in enumerate(
zip(self._new_optimizers,
self._new_optimizers_kwargs)
):
optims.append(optim(trainer._model.models[idx].parameters(), **optim_kwargs))
zip(self._new_optimizers, self._new_optimizers_kwargs)):
optims.append(
optim(trainer._model.models[idx].parameters(),
**optim_kwargs))
trainer.optimizers = optims
trainer.optimizers = optims

View File

@@ -6,20 +6,53 @@ import copy
class MetricTracker(Callback):
"""
PINA implementation of a Lightining Callback to track relevant
metrics during training.
"""
def __init__(self):
"""
PINA Implementation of a Lightning Callback for Metric Tracking.
This class provides functionality to track relevant metrics during the training process.
:ivar _collection: A list to store collected metrics after each training epoch.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:return: A dictionary containing aggregated metric values.
:rtype: dict
Example:
>>> tracker = MetricTracker()
>>> # ... Perform training ...
>>> metrics = tracker.metrics
"""
self._collection = []
def on_train_epoch_end(self, trainer, __):
self._collection.append(copy.deepcopy(trainer.logged_metrics)) # track them
"""
Collect and track metrics at the end of each training epoch.
:param trainer: The trainer object managing the training process.
:type trainer: pytorch_lightning.Trainer
:param _: Placeholder argument.
:return: None
:rtype: None
"""
self._collection.append(copy.deepcopy(
trainer.logged_metrics)) # track them
@property
def metrics(self):
common_keys = set.intersection(*map(set, self._collection))
v = {k: torch.stack([dic[k] for dic in self._collection]) for k in common_keys}
return v
"""
Aggregate collected metrics during training.
:return: A dictionary containing aggregated metric values.
:rtype: dict
"""
common_keys = set.intersection(*map(set, self._collection))
v = {
k: torch.stack([dic[k] for dic in self._collection])
for k in common_keys
}
return v