Enhancing Equations
- add init file - add docs - fixing bug System of equation, replace torch.stack with torch.hstack - add tests
This commit is contained in:
committed by
Nicola Demo
parent
09f04008b5
commit
b9ddea827b
@@ -0,0 +1,12 @@
|
||||
__all__ = [
|
||||
'SystemEquation',
|
||||
'Equation',
|
||||
'FixedValue',
|
||||
'FixedGradient',
|
||||
'FixedFlux',
|
||||
'Laplace',
|
||||
]
|
||||
|
||||
from .equation import *
|
||||
from .equation_factory import *
|
||||
from .system_equation import *
|
||||
@@ -4,7 +4,29 @@ from .equation_interface import EquationInterface
|
||||
class Equation(EquationInterface):
|
||||
|
||||
def __init__(self, equation):
|
||||
"""
|
||||
Equation class for specifing any equation in PINA.
|
||||
Each ``equation`` passed to a ``Condition`` object
|
||||
must be an ``Equation`` or ``SystemEquation``.
|
||||
|
||||
:param equation: A ``torch`` callable equation to
|
||||
evaluate the residual.
|
||||
:type equation: callable
|
||||
"""
|
||||
if not callable(equation):
|
||||
raise ValueError('equation must be a callable function.'
|
||||
'Expected a callable function, got '
|
||||
f'{equation}')
|
||||
self.__equation = equation
|
||||
|
||||
def residual(self, input_, output_):
|
||||
"""
|
||||
Residual computation of the equation.
|
||||
|
||||
:param LabelTensor input_: Input points to evaluate the equation.
|
||||
:param LabelTensor output_: Output vectors given my a model (e.g,
|
||||
a ``FeedForward`` model).
|
||||
:return: The residual evaluation of the specified equation.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
return self.__equation(input_, output_)
|
||||
@@ -6,6 +6,18 @@ from ..operators import grad, div, nabla
|
||||
class FixedValue(Equation):
|
||||
|
||||
def __init__(self, value, components=None):
|
||||
"""
|
||||
Fixed Value Equation class. This class can be
|
||||
used to enforced a fixed value for a specific
|
||||
condition, e.g. Dirichlet Boundary conditions.
|
||||
|
||||
:param float value: Value to be mantained fixed.
|
||||
:param list(str) components: the name of the output
|
||||
variables to calculate the gradient for. It should
|
||||
be a subset of the output labels. If ``None``,
|
||||
all the output variables are considered.
|
||||
Default is ``None``.
|
||||
"""
|
||||
def equation(input_, output_):
|
||||
if components is None:
|
||||
return output_ - value
|
||||
@@ -16,6 +28,22 @@ class FixedValue(Equation):
|
||||
class FixedGradient(Equation):
|
||||
|
||||
def __init__(self, value, components=None, d=None):
|
||||
"""
|
||||
Fixed Gradient Equation class. This class can be
|
||||
used to enforced a fixed gradient for a specific
|
||||
condition.
|
||||
|
||||
:param float value: Value to be mantained fixed.
|
||||
:param list(str) components: the name of the output
|
||||
variables to calculate the gradient for. It should
|
||||
be a subset of the output labels. If ``None``,
|
||||
all the output variables are considered.
|
||||
Default is ``None``.
|
||||
:param list(str) d: the name of the input variables on
|
||||
which the gradient is calculated. d should be a subset
|
||||
of the input labels. If ``None``, all the input variables
|
||||
are considered. Default is ``None``.
|
||||
"""
|
||||
def equation(input_, output_):
|
||||
return grad(output_, input_, components=components, d=d) - value
|
||||
super().__init__(equation)
|
||||
@@ -24,6 +52,22 @@ class FixedGradient(Equation):
|
||||
class FixedFlux(Equation):
|
||||
|
||||
def __init__(self, value, components=None, d=None):
|
||||
"""
|
||||
Fixed Flux Equation class. This class can be
|
||||
used to enforced a fixed flux for a specific
|
||||
condition.
|
||||
|
||||
:param float value: Value to be mantained fixed.
|
||||
:param list(str) components: the name of the output
|
||||
variables to calculate the flux for. It should
|
||||
be a subset of the output labels. If ``None``,
|
||||
all the output variables are considered.
|
||||
Default is ``None``.
|
||||
:param list(str) d: the name of the input variables on
|
||||
which the flux is calculated. d should be a subset
|
||||
of the input labels. If ``None``, all the input variables
|
||||
are considered. Default is ``None``.
|
||||
"""
|
||||
def equation(input_, output_):
|
||||
return div(output_, input_, components=components, d=d) - value
|
||||
super().__init__(equation)
|
||||
@@ -32,6 +76,21 @@ class FixedFlux(Equation):
|
||||
class Laplace(Equation):
|
||||
|
||||
def __init__(self, components=None, d=None):
|
||||
"""
|
||||
Laplace Equation class. This class can be
|
||||
used to enforced a Laplace equation for a specific
|
||||
condition (force term set to zero).
|
||||
|
||||
:param list(str) components: the name of the output
|
||||
variables to calculate the flux for. It should
|
||||
be a subset of the output labels. If ``None``,
|
||||
all the output variables are considered.
|
||||
Default is ``None``.
|
||||
:param list(str) d: the name of the input variables on
|
||||
which the flux is calculated. d should be a subset
|
||||
of the input labels. If ``None``, all the input variables
|
||||
are considered. Default is ``None``.
|
||||
"""
|
||||
def equation(input_, output_):
|
||||
return nabla(output_, input_, components=components, d=d)
|
||||
super().__init__(equation)
|
||||
@@ -1,24 +1,64 @@
|
||||
""" Module """
|
||||
import torch
|
||||
from .equation import Equation
|
||||
from ..utils import check_consistency
|
||||
|
||||
class SystemEquation(Equation):
|
||||
|
||||
def __init__(self, list_equation):
|
||||
if not isinstance(list_equation, list):
|
||||
raise TypeError('list_equation must be a list of functions')
|
||||
def __init__(self, list_equation, reduction='mean'):
|
||||
"""
|
||||
System of Equation class for specifing any system
|
||||
of equations in PINA.
|
||||
Each ``equation`` passed to a ``Condition`` object
|
||||
must be an ``Equation`` or ``SystemEquation``.
|
||||
A ``SystemEquation`` is specified by a list of
|
||||
equations.
|
||||
|
||||
:param callable equation: A ``torch`` callable equation to
|
||||
evaluate the residual
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction
|
||||
will be applied, ``'mean'``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``'sum'``: the output will
|
||||
be summed. Note: :attr:`size_average` and :attr:`reduce` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override :attr:`reduction`. Default: ``'mean'``.
|
||||
"""
|
||||
check_consistency([list_equation], list)
|
||||
check_consistency(reduction, str)
|
||||
|
||||
# equations definition
|
||||
self.equations = []
|
||||
for i, equation in enumerate(list_equation):
|
||||
if not callable(equation):
|
||||
raise TypeError('list_equation must be a list of functions')
|
||||
|
||||
for _, equation in enumerate(list_equation):
|
||||
self.equations.append(Equation(equation))
|
||||
|
||||
# possible reduction
|
||||
if reduction == 'mean':
|
||||
self.reduction = torch.mean
|
||||
elif reduction == 'sum':
|
||||
self.reduction = torch.sum
|
||||
elif reduction == 'none':
|
||||
self.reduction = reduction
|
||||
else:
|
||||
raise NotImplementedError('Only mean and sum reductions implemented.')
|
||||
|
||||
def residual(self, input_, output_):
|
||||
return torch.mean(
|
||||
torch.stack([
|
||||
"""
|
||||
Residual computation of the equation.
|
||||
|
||||
:param LabelTensor input_: Input points to evaluate the equation.
|
||||
:param LabelTensor output_: Output vectors given my a model (e.g,
|
||||
a ``FeedForward`` model).
|
||||
:return: The residual evaluation of the specified equation,
|
||||
aggregated by the ``reduction`` defined in the ``__init__``.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
residual = torch.hstack([
|
||||
equation.residual(input_, output_)
|
||||
for equation in self.equations
|
||||
]),
|
||||
dim=0)
|
||||
])
|
||||
|
||||
if self.reduction == 'none':
|
||||
return residual
|
||||
|
||||
return self.reduction(residual, dim=-1)
|
||||
43
tests/test_equations/test_equation.py
Normal file
43
tests/test_equations/test_equation.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from pina.equation import Equation
|
||||
from pina.operators import grad, nabla
|
||||
from pina import LabelTensor
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
def eq1(input_, output_):
|
||||
u_grad = grad(output_, input_)
|
||||
u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x'])
|
||||
u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y'])
|
||||
return torch.hstack([u1_xx , u2_xy])
|
||||
|
||||
def eq2(input_, output_):
|
||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||
torch.sin(input_.extract(['y'])*torch.pi))
|
||||
nabla_u = nabla(output_.extract(['u1']), input_)
|
||||
return nabla_u - force_term
|
||||
|
||||
def foo():
|
||||
pass
|
||||
|
||||
def test_constructor():
|
||||
Equation(eq1)
|
||||
Equation(eq2)
|
||||
with pytest.raises(ValueError):
|
||||
Equation([1, 2, 4])
|
||||
with pytest.raises(ValueError):
|
||||
Equation(foo())
|
||||
|
||||
def test_residual():
|
||||
eq_1 = Equation(eq1)
|
||||
eq_2 = Equation(eq2)
|
||||
|
||||
pts = LabelTensor(torch.rand(10, 2), labels=['x', 'y'])
|
||||
pts.requires_grad = True
|
||||
u = torch.pow(pts, 2)
|
||||
u.labels = ['u1', 'u2']
|
||||
|
||||
eq_1_res = eq_1.residual(pts, u)
|
||||
eq_2_res = eq_2.residual(pts, u)
|
||||
|
||||
assert eq_1_res.shape == torch.Size([10, 2])
|
||||
assert eq_2_res.shape == torch.Size([10, 1])
|
||||
47
tests/test_equations/test_systemequation.py
Normal file
47
tests/test_equations/test_systemequation.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from pina.equation import SystemEquation
|
||||
from pina.operators import grad, nabla
|
||||
from pina import LabelTensor
|
||||
import torch
|
||||
import pytest
|
||||
|
||||
def eq1(input_, output_):
|
||||
u_grad = grad(output_, input_)
|
||||
u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x'])
|
||||
u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y'])
|
||||
return torch.hstack([u1_xx , u2_xy])
|
||||
|
||||
def eq2(input_, output_):
|
||||
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
|
||||
torch.sin(input_.extract(['y'])*torch.pi))
|
||||
nabla_u = nabla(output_.extract(['u1']), input_)
|
||||
return nabla_u - force_term
|
||||
|
||||
def foo():
|
||||
pass
|
||||
|
||||
def test_constructor():
|
||||
SystemEquation([eq1, eq2])
|
||||
SystemEquation([eq1, eq2], reduction='sum')
|
||||
with pytest.raises(NotImplementedError):
|
||||
SystemEquation([eq1, eq2], reduction='foo')
|
||||
with pytest.raises(ValueError):
|
||||
SystemEquation(foo)
|
||||
|
||||
def test_residual():
|
||||
|
||||
pts = LabelTensor(torch.rand(10, 2), labels=['x', 'y'])
|
||||
pts.requires_grad = True
|
||||
u = torch.pow(pts, 2)
|
||||
u.labels = ['u1', 'u2']
|
||||
|
||||
eq_1 = SystemEquation([eq1, eq2])
|
||||
res = eq_1.residual(pts, u)
|
||||
assert res.shape == torch.Size([10])
|
||||
|
||||
eq_1 = SystemEquation([eq1, eq2], reduction='sum')
|
||||
res = eq_1.residual(pts, u)
|
||||
assert res.shape == torch.Size([10])
|
||||
|
||||
eq_1 = SystemEquation([eq1, eq2], reduction='none')
|
||||
res = eq_1.residual(pts, u)
|
||||
assert res.shape == torch.Size([10, 3])
|
||||
Reference in New Issue
Block a user