documentation (#79)
Documentation for operator.py, span.py, plotter.py. Co-authored-by: Dario Coscia <dariocoscia@dhcp-128.eduroam.sissa.it>
This commit is contained in:
@@ -4,8 +4,11 @@ Code Documentation
|
||||
.. toctree::
|
||||
:maxdepth: 3
|
||||
|
||||
PINN <pinn.rst>
|
||||
LabelTensor <label_tensor.rst>
|
||||
Span <span.rst>
|
||||
FeedForward <fnn.rst>
|
||||
DeepONet <deeponet.rst>
|
||||
PINN <pinn.rst>
|
||||
ContinuousConv <convolution.rst>
|
||||
Operators <operators.rst>
|
||||
Plotter <plotter.rst>
|
||||
|
||||
10
docs/source/_rst/operators.rst
Normal file
10
docs/source/_rst/operators.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Operators
|
||||
===========
|
||||
.. currentmodule:: pina.operators
|
||||
|
||||
.. automodule:: pina.operators
|
||||
:members:
|
||||
:private-members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:noindex:
|
||||
8
docs/source/_rst/plotter.rst
Normal file
8
docs/source/_rst/plotter.rst
Normal file
@@ -0,0 +1,8 @@
|
||||
Plotter
|
||||
===========
|
||||
.. currentmodule:: pina.plotter
|
||||
|
||||
.. automodule:: pina.plotter
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:noindex:
|
||||
10
docs/source/_rst/span.rst
Normal file
10
docs/source/_rst/span.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Span
|
||||
===========
|
||||
.. currentmodule:: pina.span
|
||||
|
||||
.. automodule:: pina.span
|
||||
|
||||
.. autoclass:: Span
|
||||
:members:
|
||||
:show-inheritance:
|
||||
:noindex:
|
||||
@@ -6,17 +6,43 @@ from pina.label_tensor import LabelTensor
|
||||
|
||||
def grad(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
Perform gradient operation. The operator works for
|
||||
vectorial and scalar functions, with multiple input
|
||||
coordinates.
|
||||
|
||||
:param output_: output of the PINN, i.e. function values.
|
||||
:type output_: LabelTensor
|
||||
:param input_: input of the PINN, i.e. function coordinates.
|
||||
:type input_: LabelTensor
|
||||
:param components: function components to apply the operator,
|
||||
defaults to None.
|
||||
:type components: list(str), optional
|
||||
:param d: coordinates of function components to be differentiated,
|
||||
defaults to None.
|
||||
:type d: list(str), optional
|
||||
"""
|
||||
|
||||
def grad_scalar_output(output_, input_, d):
|
||||
"""
|
||||
Perform gradient operation for a scalar function.
|
||||
|
||||
:param output_: output of the PINN, i.e. function values.
|
||||
:type output_: LabelTensor
|
||||
:param input_: input of the PINN, i.e. function coordinates.
|
||||
:type input_: LabelTensor
|
||||
:param d: coordinates of function components to be differentiated,
|
||||
defaults to None.
|
||||
:type d: list(str), optional
|
||||
:raises RuntimeError: a vectorial function is passed.
|
||||
:raises RuntimeError: missing derivative labels.
|
||||
:return: function gradients.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
if len(output_.labels) != 1:
|
||||
raise RuntimeError
|
||||
raise RuntimeError('only scalar function can be differentiated')
|
||||
if not all([di in input_.labels for di in d]):
|
||||
raise RuntimeError
|
||||
raise RuntimeError('derivative labels missing from input tensor')
|
||||
|
||||
output_fieldname = output_.labels[0]
|
||||
gradients = torch.autograd.grad(
|
||||
@@ -67,7 +93,25 @@ def grad(output_, input_, components=None, d=None):
|
||||
|
||||
def div(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
Perform divergence operation. The operator works for
|
||||
vectorial functions, with multiple input coordinates.
|
||||
|
||||
:param output_: output of the PINN, i.e. function values.
|
||||
:type output_: LabelTensor
|
||||
:param input_: input of the PINN, i.e. function coordinates.
|
||||
:type input_: LabelTensor
|
||||
:param components: function components to apply the operator,
|
||||
defaults to None.
|
||||
:type components: list(str), optional
|
||||
:param d: coordinates of function components to be differentiated,
|
||||
defaults to None.
|
||||
:type d: list(str), optional
|
||||
:raises TypeError: div operator works only for LabelTensor.
|
||||
:raises ValueError: div operator works only for vector fields.
|
||||
:raises ValueError: div operator must derive all components with
|
||||
respect to all coordinates.
|
||||
:return: Function divergence.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
@@ -79,7 +123,7 @@ def div(output_, input_, components=None, d=None):
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] < 2 or len(components) < 2:
|
||||
raise ValueError('div supported only for vector field')
|
||||
raise ValueError('div supported only for vector fields')
|
||||
|
||||
if len(components) != len(d):
|
||||
raise ValueError
|
||||
@@ -99,7 +143,28 @@ def div(output_, input_, components=None, d=None):
|
||||
|
||||
def nabla(output_, input_, components=None, d=None, method='std'):
|
||||
"""
|
||||
TODO
|
||||
Perform nabla (laplace) operation. The operator works for
|
||||
vectorial and scalar functions, with multiple input
|
||||
coordinates.
|
||||
|
||||
:param output_: output of the PINN, i.e. function values.
|
||||
:type output_: LabelTensor
|
||||
:param input_: input of the PINN, i.e. function coordinates.
|
||||
:type input_: LabelTensor
|
||||
:param components: function components to apply the operator,
|
||||
defaults to None.
|
||||
:type components: list(str), optional
|
||||
:param d: coordinates of function components to be differentiated,
|
||||
defaults to None.
|
||||
:type d: list(str), optional
|
||||
:param method: used method to calculate nabla, defaults to 'std'.
|
||||
:type method: str, optional including 'divgrad' where first gradient
|
||||
and later divergece operator are applied.
|
||||
:raises ValueError: for vectorial field derivative with respect to
|
||||
all coordinates must be performed.
|
||||
:raises NotImplementedError: 'divgrad' not implemented as method.
|
||||
:return: Function nabla.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
@@ -111,7 +176,7 @@ def nabla(output_, input_, components=None, d=None, method='std'):
|
||||
raise ValueError
|
||||
|
||||
if method == 'divgrad':
|
||||
raise NotImplementedError
|
||||
raise NotImplementedError('divgrad not implemented as method')
|
||||
# TODO fix
|
||||
# grad_output = grad(output_, input_, components, d)
|
||||
# result = div(grad_output, input_, d=d)
|
||||
@@ -146,6 +211,25 @@ def nabla(output_, input_, components=None, d=None, method='std'):
|
||||
|
||||
|
||||
def advection(output_, input_, velocity_field, components=None, d=None):
|
||||
"""
|
||||
Perform advection operation. The operator works for
|
||||
vectorial functions, with multiple input coordinates.
|
||||
|
||||
:param output_: output of the PINN, i.e. function values.
|
||||
:type output_: LabelTensor
|
||||
:param input_: input of the PINN, i.e. function coordinates.
|
||||
:type input_: LabelTensor
|
||||
:param velocity_field: field used for multiplying the gradient.
|
||||
:type velocity_field: str
|
||||
:param components: function components to apply the operator,
|
||||
defaults to None.
|
||||
:type components: list(str), optional
|
||||
:param d: coordinates of function components to be differentiated,
|
||||
defaults to None.
|
||||
:type d: list(str), optional
|
||||
:return: Function advection.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ class PINN(object):
|
||||
optimizer_kwargs=None,
|
||||
lr=0.001,
|
||||
lr_scheduler_type=lrs.ConstantLR,
|
||||
lr_scheduler_kwargs={"factor" : 1, "total_iters" : 0},
|
||||
lr_scheduler_kwargs={"factor": 1, "total_iters": 0},
|
||||
regularizer=0.00001,
|
||||
batch_size=None,
|
||||
dtype=torch.float32,
|
||||
@@ -80,7 +80,8 @@ class PINN(object):
|
||||
optimizer_kwargs['lr'] = lr
|
||||
self.optimizer = optimizer(
|
||||
self.model.parameters(), weight_decay=regularizer, **optimizer_kwargs)
|
||||
self._lr_scheduler = lr_scheduler_type(self.optimizer, **lr_scheduler_kwargs)
|
||||
self._lr_scheduler = lr_scheduler_type(
|
||||
self.optimizer, **lr_scheduler_kwargs)
|
||||
|
||||
self.batch_size = batch_size
|
||||
self.data_set = PinaDataset(self)
|
||||
@@ -241,7 +242,9 @@ class PINN(object):
|
||||
pts = condition.input_points.to(
|
||||
dtype=self.dtype, device=self.device)
|
||||
predicted = self.model(pts)
|
||||
residuals = predicted - condition.output_points.to(device=self.device, dtype=self.dtype) # TODO fix
|
||||
residuals = predicted - \
|
||||
condition.output_points.to(
|
||||
device=self.device, dtype=self.dtype) # TODO fix
|
||||
local_loss = (
|
||||
condition.data_weight*self._compute_norm(residuals))
|
||||
single_loss.append(local_loss)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
""" Module for plotting. """
|
||||
import matplotlib
|
||||
#matplotlib.use('Qt5Agg')
|
||||
# matplotlib.use('Qt5Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
@@ -12,8 +12,24 @@ from .problem import SpatialProblem, TimeDependentProblem
|
||||
|
||||
|
||||
class Plotter:
|
||||
"""
|
||||
Implementation of a plotter class, for easy visualizations.
|
||||
"""
|
||||
|
||||
def plot_samples(self, pinn, variables=None):
|
||||
"""
|
||||
Plot a sample of solution.
|
||||
|
||||
:param pinn: the PINN object.
|
||||
:type pinn: PINN
|
||||
:param variables: pinn variable domains: spatial or temporal,
|
||||
defaults to None.
|
||||
:type variables: str, optional
|
||||
|
||||
:Example:
|
||||
>>> plotter = Plotter()
|
||||
>>> plotter.plot_samples(pinn=pinn, variables='spatial')
|
||||
"""
|
||||
|
||||
if variables is None:
|
||||
variables = pinn.problem.domain.variables
|
||||
@@ -51,7 +67,17 @@ class Plotter:
|
||||
plt.show()
|
||||
|
||||
def _1d_plot(self, pts, pred, method, truth_solution=None, **kwargs):
|
||||
"""
|
||||
"""Plot solution for one dimensional function
|
||||
|
||||
:param pts: Points to plot the solution.
|
||||
:type pts: torch.Tensor
|
||||
:param pred: PINN solution evaluated at 'pts'.
|
||||
:type pred: torch.Tensor
|
||||
:param method: not used, kept for code compatibility
|
||||
:type method: None
|
||||
:param truth_solution: Real solution evaluated at 'pts',
|
||||
defaults to None.
|
||||
:type truth_solution: torch.Tensor, optional
|
||||
"""
|
||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
|
||||
|
||||
@@ -67,7 +93,19 @@ class Plotter:
|
||||
|
||||
def _2d_plot(self, pts, pred, v, res, method, truth_solution=None,
|
||||
**kwargs):
|
||||
"""
|
||||
"""Plot solution for two dimensional function
|
||||
|
||||
:param pts: Points to plot the solution.
|
||||
:type pts: torch.Tensor
|
||||
:param pred: PINN solution evaluated at 'pts'.
|
||||
:type pred: torch.Tensor
|
||||
:param method: matplotlib method to plot 2-dimensional data,
|
||||
see https://matplotlib.org/stable/api/axes_api.html for
|
||||
reference.
|
||||
:type method: str
|
||||
:param truth_solution: Real solution evaluated at 'pts',
|
||||
defaults to None.
|
||||
:type truth_solution: torch.Tensor, optional
|
||||
"""
|
||||
|
||||
grids = [p_.reshape(res, res) for p_ in pts.extract(v).cpu().T]
|
||||
@@ -77,9 +115,11 @@ class Plotter:
|
||||
truth_output = truth_solution(pts).float().reshape(res, res)
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
|
||||
cb = getattr(ax[0], method)(*grids, pred_output.cpu().detach(), **kwargs)
|
||||
cb = getattr(ax[0], method)(
|
||||
*grids, pred_output.cpu().detach(), **kwargs)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
cb = getattr(ax[1], method)(*grids, truth_output.cpu().detach(), **kwargs)
|
||||
cb = getattr(ax[1], method)(
|
||||
*grids, truth_output.cpu().detach(), **kwargs)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
cb = getattr(ax[2], method)(*grids,
|
||||
(truth_output-pred_output).cpu().detach(),
|
||||
@@ -87,13 +127,31 @@ class Plotter:
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
else:
|
||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
|
||||
cb = getattr(ax, method)(*grids, pred_output.cpu().detach(), **kwargs)
|
||||
cb = getattr(ax, method)(
|
||||
*grids, pred_output.cpu().detach(), **kwargs)
|
||||
fig.colorbar(cb, ax=ax)
|
||||
|
||||
|
||||
def plot(self, pinn, components=None, fixed_variables={}, method='contourf',
|
||||
res=256, filename=None, **kwargs):
|
||||
"""
|
||||
Plot sample of PINN output.
|
||||
|
||||
:param pinn: the PINN object.
|
||||
:type pinn: PINN
|
||||
:param components: function components to plot, defaults to None.
|
||||
:type components: list['str'], optional
|
||||
:param fixed_variables: function variables to be kept fixed during
|
||||
plotting passed as a dict where the dict-key is the variable
|
||||
and the dict-value is the value to be kept fixed, defaults to {}.
|
||||
:type fixed_variables: dict, optional
|
||||
:param method: matplotlib method to plot the solution,
|
||||
defaults to 'contourf'.
|
||||
:type method: str, optional
|
||||
:param res: number of points used for plotting in each axis,
|
||||
defaults to 256.
|
||||
:type res: int, optional
|
||||
:param filename: file name to save the plot, defaults to None
|
||||
:type filename: str, optional
|
||||
"""
|
||||
if components is None:
|
||||
components = [pinn.problem.output_variables]
|
||||
@@ -134,9 +192,14 @@ class Plotter:
|
||||
|
||||
def plot_loss(self, pinn, label=None, log_scale=True):
|
||||
"""
|
||||
Plot the loss trend
|
||||
Plot the loss function values during traininig.
|
||||
|
||||
TODO
|
||||
:param pinn: the PINN object.
|
||||
:type pinn: PINN
|
||||
:param label: matplolib label, defaults to None
|
||||
:type label: str, optional
|
||||
:param log_scale: use of log scale in plotting, defaults to True.
|
||||
:type log_scale: bool, optional
|
||||
"""
|
||||
|
||||
if not label:
|
||||
|
||||
120
pina/span.py
120
pina/span.py
@@ -7,8 +7,18 @@ from .utils import torch_lhs
|
||||
|
||||
|
||||
class Span(Location):
|
||||
def __init__(self, span_dict):
|
||||
"""PINA implementation of Hypercube domain."""
|
||||
|
||||
def __init__(self, span_dict):
|
||||
"""
|
||||
:param span_dict: A dictionary with dict-key a string representing
|
||||
the input variables for the pinn, and dict-value a list with
|
||||
the domain extrema.
|
||||
:type span_dict: dict
|
||||
|
||||
:Example:
|
||||
>>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
|
||||
"""
|
||||
self.fixed_ = {}
|
||||
self.range_ = {}
|
||||
|
||||
@@ -22,14 +32,46 @@ class Span(Location):
|
||||
|
||||
@property
|
||||
def variables(self):
|
||||
"""Spatial variables.
|
||||
|
||||
:return: Spatial variables defined in '__init__()'
|
||||
:rtype: list[str]
|
||||
"""
|
||||
return list(self.fixed_.keys()) + list(self.range_.keys())
|
||||
|
||||
def update(self, new_span):
|
||||
"""Adding new dimensions on the span
|
||||
|
||||
:param new_span: A new span object to merge
|
||||
:type new_span: Span
|
||||
|
||||
:Example:
|
||||
>>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
|
||||
>>> spatial_domain.variables
|
||||
['x', 'y']
|
||||
>>> spatial_domain_2 = Span({'z': [3, 4], 'w': [0, 1]})
|
||||
>>> spatial_domain.update(spatial_domain_2)
|
||||
>>> spatial_domain.variables
|
||||
['x', 'y', 'z', 'w']
|
||||
"""
|
||||
self.fixed_.update(new_span.fixed_)
|
||||
self.range_.update(new_span.range_)
|
||||
|
||||
def _sample_range(self, n, mode, bounds):
|
||||
"""
|
||||
"""Rescale the samples to the correct bounds
|
||||
|
||||
:param n: Number of points to sample, see Note below
|
||||
for reference.
|
||||
:type n: int
|
||||
:param mode: Mode for sampling, defaults to 'random'.
|
||||
Available modes include: random sampling, 'random';
|
||||
latin hypercube sampling, 'latin' or 'lh';
|
||||
chebyshev sampling, 'chebyshev'; grid sampling 'grid'.
|
||||
:type mode: str, optional
|
||||
:param bounds: Bounds to rescale the samples.
|
||||
:type bounds: torch.Tensor
|
||||
:return: Rescaled sample points.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
dim = bounds.shape[0]
|
||||
if mode in ['chebyshev', 'grid'] and dim != 1:
|
||||
@@ -50,7 +92,55 @@ class Span(Location):
|
||||
return pts
|
||||
|
||||
def sample(self, n, mode='random', variables='all'):
|
||||
"""TODO
|
||||
"""Sample routine.
|
||||
|
||||
:param n: Number of points to sample, see Note below
|
||||
for reference.
|
||||
:type n: int
|
||||
:param mode: Mode for sampling, defaults to 'random'.
|
||||
Available modes include: random sampling, 'random';
|
||||
latin hypercube sampling, 'latin' or 'lh';
|
||||
chebyshev sampling, 'chebyshev'; grid sampling 'grid'.
|
||||
:type mode: str, optional
|
||||
:param variables: pinn variable to be sampled, defaults to 'all'.
|
||||
:type variables: str or list[str], optional
|
||||
|
||||
.. note::
|
||||
The total number of points sampled in case of multiple variables
|
||||
is not 'n', and it depends on the chosen 'mode'. If 'mode' is
|
||||
'grid' or 'chebyshev', the points are sampled independentely
|
||||
across the variables and the results crossed together, i.e. the
|
||||
final number of points is 'n' to the power of the number of
|
||||
variables. If 'mode' is 'random', 'lh' or 'latin', the variables
|
||||
are sampled all together, and the final number of points
|
||||
|
||||
.. warning::
|
||||
The extrema values of Span are always sampled only for 'grid' mode.
|
||||
|
||||
:Example:
|
||||
>>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
|
||||
>>> spatial_domain.sample(n=4, mode='random')
|
||||
tensor([[0.0108, 0.7643],
|
||||
[0.4477, 0.8015],
|
||||
[0.2063, 0.8087],
|
||||
[0.8735, 0.6349]])
|
||||
>>> spatial_domain.sample(n=4, mode='grid')
|
||||
tensor([[0.0000, 0.0000],
|
||||
[0.3333, 0.0000],
|
||||
[0.6667, 0.0000],
|
||||
[1.0000, 0.0000],
|
||||
[0.0000, 0.3333],
|
||||
[0.3333, 0.3333],
|
||||
[0.6667, 0.3333],
|
||||
[1.0000, 0.3333],
|
||||
[0.0000, 0.6667],
|
||||
[0.3333, 0.6667],
|
||||
[0.6667, 0.6667],
|
||||
[1.0000, 0.6667],
|
||||
[0.0000, 1.0000],
|
||||
[0.3333, 1.0000],
|
||||
[0.6667, 1.0000],
|
||||
[1.0000, 1.0000]])
|
||||
"""
|
||||
def _1d_sampler(n, mode, variables):
|
||||
""" Sample independentely the variables and cross the results"""
|
||||
@@ -81,7 +171,20 @@ class Span(Location):
|
||||
return result
|
||||
|
||||
def _Nd_sampler(n, mode, variables):
|
||||
""" Sample all the variables together """
|
||||
"""Sample all the variables together
|
||||
|
||||
:param n: Number of points to sample.
|
||||
:type n: int
|
||||
:param mode: Mode for sampling, defaults to 'random'.
|
||||
Available modes include: random sampling, 'random';
|
||||
latin hypercube sampling, 'latin' or 'lh';
|
||||
chebyshev sampling, 'chebyshev'; grid sampling 'grid'.
|
||||
:type mode: str, optional.
|
||||
:param variables: pinn variable to be sampled, defaults to 'all'.
|
||||
:type variables: str or list[str], optional.
|
||||
:return: Sample points.
|
||||
:rtype: list[torch.Tensor]
|
||||
"""
|
||||
pairs = [(k, v) for k, v in self.range_.items() if k in variables]
|
||||
keys, values = map(list, zip(*pairs))
|
||||
bounds = torch.tensor(values)
|
||||
@@ -101,6 +204,15 @@ class Span(Location):
|
||||
return result
|
||||
|
||||
def _single_points_sample(n, variables):
|
||||
"""Sample a single point in one dimension.
|
||||
|
||||
:param n: Number of points to sample.
|
||||
:type n: int
|
||||
:param variables: Variables to sample from.
|
||||
:type variables: list[str]
|
||||
:return: Sample points.
|
||||
:rtype: list[torch.Tensor]
|
||||
"""
|
||||
tmp = []
|
||||
for variable in variables:
|
||||
if variable in self.fixed_.keys():
|
||||
|
||||
Reference in New Issue
Block a user