Documentation for v0.1 version (#199)

* Adding Equations, solving typos
* improve _code.rst
* the team rst and restuctore index.rst
* fixing errors

---------

Co-authored-by: Dario Coscia <dariocoscia@dhcp-015.eduroam.sissa.it>
This commit is contained in:
Dario Coscia
2023-11-08 14:39:00 +01:00
committed by Nicola Demo
parent 3f9305d475
commit 8b7b61b3bd
144 changed files with 2741 additions and 1766 deletions

View File

@@ -1,6 +1,7 @@
""" Module """
""" Module for Equation. """
from .equation_interface import EquationInterface
class Equation(EquationInterface):
def __init__(self, equation):
@@ -11,7 +12,7 @@ class Equation(EquationInterface):
:param equation: A ``torch`` callable equation to
evaluate the residual.
:type equation: callable
:type equation: Callable
"""
if not callable(equation):
raise ValueError('equation must be a callable function.'
@@ -29,4 +30,4 @@ class Equation(EquationInterface):
:return: The residual evaluation of the specified equation.
:rtype: LabelTensor
"""
return self.__equation(input_, output_)
return self.__equation(input_, output_)

View File

@@ -4,7 +4,7 @@ from ..operators import grad, div, laplacian
class FixedValue(Equation):
def __init__(self, value, components=None):
"""
Fixed Value Equation class. This class can be
@@ -18,10 +18,12 @@ class FixedValue(Equation):
all the output variables are considered.
Default is ``None``.
"""
def equation(input_, output_):
if components is None:
return output_ - value
return output_.extract(components) - value
return output_.extract(components) - value
super().__init__(equation)
@@ -43,9 +45,11 @@ class FixedGradient(Equation):
which the gradient is calculated. d should be a subset
of the input labels. If ``None``, all the input variables
are considered. Default is ``None``.
"""
"""
def equation(input_, output_):
return grad(output_, input_, components=components, d=d) - value
super().__init__(equation)
@@ -67,9 +71,11 @@ class FixedFlux(Equation):
which the flux is calculated. d should be a subset
of the input labels. If ``None``, all the input variables
are considered. Default is ``None``.
"""
"""
def equation(input_, output_):
return div(output_, input_, components=components, d=d) - value
super().__init__(equation)
@@ -90,7 +96,9 @@ class Laplace(Equation):
which the flux is calculated. d should be a subset
of the input labels. If ``None``, all the input variables
are considered. Default is ``None``.
"""
"""
def equation(input_, output_):
return laplacian(output_, input_, components=components, d=d)
super().__init__(equation)

View File

@@ -1,8 +1,9 @@
""" Module """
""" Module for SystemEquation. """
import torch
from .equation import Equation
from ..utils import check_consistency
class SystemEquation(Equation):
def __init__(self, list_equation, reduction='mean'):
@@ -14,7 +15,7 @@ class SystemEquation(Equation):
A ``SystemEquation`` is specified by a list of
equations.
:param callable equation: A ``torch`` callable equation to
:param Callable equation: A ``torch`` callable equation to
evaluate the residual
:param str reduction: Specifies the reduction to apply to the output:
``none`` | ``mean`` | ``sum`` | ``callable``. ``none``: no reduction
@@ -28,7 +29,7 @@ class SystemEquation(Equation):
# equations definition
self.equations = []
for _, equation in enumerate(list_equation):
for _, equation in enumerate(list_equation):
self.equations.append(Equation(equation))
# possible reduction
@@ -39,7 +40,8 @@ class SystemEquation(Equation):
elif (reduction == 'none') or callable(reduction):
self.reduction = reduction
else:
raise NotImplementedError('Only mean and sum reductions implemented.')
raise NotImplementedError(
'Only mean and sum reductions implemented.')
def residual(self, input_, output_):
"""
@@ -52,12 +54,10 @@ class SystemEquation(Equation):
aggregated by the ``reduction`` defined in the ``__init__``.
:rtype: LabelTensor
"""
residual = torch.hstack([
equation.residual(input_, output_)
for equation in self.equations
])
residual = torch.hstack(
[equation.residual(input_, output_) for equation in self.equations])
if self.reduction == 'none':
return residual
return self.reduction(residual, dim=-1)
return self.reduction(residual, dim=-1)