Update Laplace class and add unit tests (#645)

This commit is contained in:
Giovanni Canali
2025-09-22 15:05:28 +02:00
committed by GitHub
parent 4a6e73fa54
commit 4e37468460
15 changed files with 673 additions and 157 deletions

View File

@@ -14,6 +14,30 @@ Equation Factory
:members:
:show-inheritance:
.. autoclass:: FixedLaplacian
:members:
:show-inheritance:
.. autoclass:: Laplace
:members:
:show-inheritance:
.. autoclass:: Advection
:members:
:show-inheritance:
.. autoclass:: AllenCahn
:members:
:show-inheritance:
.. autoclass:: DiffusionReaction
:members:
:show-inheritance:
.. autoclass:: Helmholtz
:members:
:show-inheritance:
.. autoclass:: Poisson
:members:
:show-inheritance:

View File

@@ -6,9 +6,26 @@ __all__ = [
"FixedValue",
"FixedGradient",
"FixedFlux",
"FixedLaplacian",
"Laplace",
"Advection",
"AllenCahn",
"DiffusionReaction",
"Helmholtz",
"Poisson",
]
from .equation import Equation
from .equation_factory import FixedFlux, FixedGradient, Laplace, FixedValue
from .equation_factory import (
FixedFlux,
FixedGradient,
FixedLaplacian,
FixedValue,
Laplace,
Advection,
AllenCahn,
DiffusionReaction,
Helmholtz,
Poisson,
)
from .system_equation import SystemEquation

View File

@@ -1,7 +1,10 @@
"""Module for defining various general equations."""
from typing import Callable
import torch
from .equation import Equation
from ..operator import grad, div, laplacian
from ..utils import check_consistency
class FixedValue(Equation):
@@ -110,9 +113,53 @@ class FixedFlux(Equation):
super().__init__(equation)
class Laplace(Equation):
class FixedLaplacian(Equation):
"""
Equation to enforce a fixed laplacian for a specific condition.
"""
def __init__(self, value, components=None, d=None):
"""
Initialization of the :class:`FixedLaplacian` class.
:param float value: The fixed value to be enforced to the laplacian.
:param list[str] components: The name of the output variables for which
the fixed laplace condition is applied. It should be a subset of the
output labels. If ``None``, all output variables are considered.
Default is ``None``.
:param list[str] d: The name of the input variables on which the
laplacian is computed. It should be a subset of the input labels.
If ``None``, all the input variables are considered.
Default is ``None``.
"""
def equation(input_, output_):
"""
Definition of the equation to enforce a fixed laplacian.
:param LabelTensor input_: Input points where the equation is
evaluated.
:param LabelTensor output_: Output tensor, eventually produced by a
:class:`torch.nn.Module` instance.
:return: The computed residual of the equation.
:rtype: LabelTensor
"""
return (
laplacian(output_, input_, components=components, d=d) - value
)
super().__init__(equation)
class Laplace(FixedLaplacian):
"""
Equation to enforce a null laplacian for a specific condition.
The equation is defined as follows:
.. math::
\delta u = 0
"""
def __init__(self, components=None, d=None):
@@ -128,18 +175,277 @@ class Laplace(Equation):
If ``None``, all the input variables are considered.
Default is ``None``.
"""
super().__init__(0.0, components=components, d=d)
class Advection(Equation):
r"""
Implementation of the N-dimensional advection equation with constant
velocity parameter. The equation is defined as follows:
.. math::
\frac{\partial u}{\partial t} + c \cdot \nabla u = 0
Here, :math:`c` is the advection velocity parameter.
"""
def __init__(self, c):
"""
Initialization of the :class:`Advection` class.
:param c: The advection velocity. If a scalar is provided, the same
velocity is applied to all spatial dimensions. If a list is
provided, it must contain one value per spatial dimension.
:type c: float | int | List[float] | List[int]
:raises ValueError: If ``c`` is an empty list.
"""
# Check consistency
check_consistency(c, (float, int, list))
if isinstance(c, list):
all(check_consistency(ci, (float, int)) for ci in c)
if len(c) < 1:
raise ValueError("'c' cannot be an empty list.")
else:
c = [c]
# Store advection velocity parameter
self.c = torch.tensor(c).unsqueeze(0)
def equation(input_, output_):
"""
Definition of the equation to enforce a null laplacian.
Implementation of the advection equation.
:param LabelTensor input_: Input points where the equation is
evaluated.
:param LabelTensor output_: Output tensor, eventually produced by a
:class:`torch.nn.Module` instance.
:return: The computed residual of the equation.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the advection equation.
:rtype: LabelTensor
:raises ValueError: If the ``input_`` labels do not contain the time
variable 't'.
:raises ValueError: If ``c`` is a list and its length is not
consistent with the number of spatial dimensions.
"""
return laplacian(output_, input_, components=components, d=d)
# Store labels
input_lbl = input_.labels
spatial_d = [di for di in input_lbl if di != "t"]
# Ensure time is passed as input
if "t" not in input_lbl:
raise ValueError(
"The ``input_`` labels must contain the time 't' variable."
)
# Ensure consistency of c length
if len(self.c) != (len(input_lbl) - 1) and len(self.c) > 1:
raise ValueError(
"If 'c' is passed as a list, its length must be equal to "
"the number of spatial dimensions."
)
# Repeat c to ensure consistent shape for advection
self.c = self.c.repeat(output_.shape[0], 1)
if self.c.shape[1] != (len(input_lbl) - 1):
self.c = self.c.repeat(1, len(input_lbl) - 1)
# Add a dimension to c for the following operations
self.c = self.c.unsqueeze(-1)
# Compute the time derivative and the spatial gradient
time_der = grad(output_, input_, components=None, d="t")
grads = grad(output_=output_, input_=input_, d=spatial_d)
# Reshape and transpose
tmp = grads.reshape(*output_.shape, len(spatial_d))
tmp = tmp.transpose(-1, -2)
# Compute advection term
adv = (tmp * self.c).sum(dim=tmp.tensor.ndim - 2)
return time_der + adv
super().__init__(equation)
class AllenCahn(Equation):
r"""
Implementation of the N-dimensional Allen-Cahn equation, defined as follows:
.. math::
\frac{\partial u}{\partial t} - \alpha \Delta u + \beta(u^3 - u) = 0
Here, :math:`\alpha` and :math:`\beta` are parameters of the equation.
"""
def __init__(self, alpha, beta):
"""
Initialization of the :class:`AllenCahn` class.
:param alpha: The diffusion coefficient.
:type alpha: float | int
:param beta: The reaction coefficient.
:type beta: float | int
"""
check_consistency(alpha, (float, int))
check_consistency(beta, (float, int))
self.alpha = alpha
self.beta = beta
def equation(input_, output_):
"""
Implementation of the Allen-Cahn equation.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the Allen-Cahn equation.
:rtype: LabelTensor
:raises ValueError: If the ``input_`` labels do not contain the time
variable 't'.
"""
# Ensure time is passed as input
if "t" not in input_.labels:
raise ValueError(
"The ``input_`` labels must contain the time 't' variable."
)
# Compute the time derivative and the spatial laplacian
u_t = grad(output_, input_, d=["t"])
u_xx = laplacian(
output_, input_, d=[di for di in input_.labels if di != "t"]
)
return u_t - self.alpha * u_xx + self.beta * (output_**3 - output_)
super().__init__(equation)
class DiffusionReaction(Equation):
r"""
Implementation of the N-dimensional Diffusion-Reaction equation,
defined as follows:
.. math::
\frac{\partial u}{\partial t} - \alpha \Delta u - f = 0
Here, :math:`\alpha` is a parameter of the equation, while :math:`f` is the
reaction term.
"""
def __init__(self, alpha, forcing_term):
"""
Initialization of the :class:`DiffusionReaction` class.
:param alpha: The diffusion coefficient.
:type alpha: float | int
:param Callable forcing_term: The forcing field function, taking as
input the points on which evaluation is required.
"""
check_consistency(alpha, (float, int))
check_consistency(forcing_term, (Callable))
self.alpha = alpha
self.forcing_term = forcing_term
def equation(input_, output_):
"""
Implementation of the Diffusion-Reaction equation.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the Diffusion-Reaction equation.
:rtype: LabelTensor
:raises ValueError: If the ``input_`` labels do not contain the time
variable 't'.
"""
# Ensure time is passed as input
if "t" not in input_.labels:
raise ValueError(
"The ``input_`` labels must contain the time 't' variable."
)
# Compute the time derivative and the spatial laplacian
u_t = grad(output_, input_, d=["t"])
u_xx = laplacian(
output_, input_, d=[di for di in input_.labels if di != "t"]
)
return u_t - self.alpha * u_xx - self.forcing_term(input_)
super().__init__(equation)
class Helmholtz(Equation):
r"""
Implementation of the Helmholtz equation, defined as follows:
.. math::
\Delta u + k u - f = 0
Here, :math:`k` is a parameter of the equation, while :math:`f` is the
forcing term.
"""
def __init__(self, k, forcing_term):
"""
Initialization of the :class:`Helmholtz` class.
:param k: The parameter of the equation.
:type k: float | int
:param Callable forcing_term: The forcing field function, taking as
input the points on which evaluation is required.
"""
check_consistency(k, (int, float))
check_consistency(forcing_term, (Callable))
self.k = k
self.forcing_term = forcing_term
def equation(input_, output_):
"""
Implementation of the Helmholtz equation.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the Helmholtz equation.
:rtype: LabelTensor
"""
lap = laplacian(output_, input_)
return lap + self.k * output_ - self.forcing_term(input_)
super().__init__(equation)
class Poisson(Equation):
r"""
Implementation of the Poisson equation, defined as follows:
.. math::
\Delta u - f = 0
Here, :math:`f` is the forcing term.
"""
def __init__(self, forcing_term):
"""
Initialization of the :class:`Poisson` class.
:param Callable forcing_term: The forcing field function, taking as
input the points on which evaluation is required.
"""
check_consistency(forcing_term, (Callable))
self.forcing_term = forcing_term
def equation(input_, output_):
"""
Implementation of the Poisson equation.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the Poisson equation.
:rtype: LabelTensor
"""
lap = laplacian(output_, input_)
return lap - self.forcing_term(input_)
super().__init__(equation)

View File

@@ -2,42 +2,10 @@
import torch
from ... import Condition
from ...operator import grad
from ...equation import Equation
from ...domain import CartesianDomain
from ...utils import check_consistency
from ...problem import SpatialProblem, TimeDependentProblem
class AdvectionEquation(Equation):
"""
Implementation of the advection equation.
"""
def __init__(self, c):
"""
Initialization of the :class:`AdvectionEquation`.
:param c: The advection velocity parameter.
:type c: float | int
"""
self.c = c
check_consistency(self.c, (float, int))
def equation(input_, output_):
"""
Implementation of the advection equation.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:return: The residual of the advection equation.
:rtype: LabelTensor
"""
u_x = grad(output_, input_, components=["u"], d=["x"])
u_t = grad(output_, input_, components=["u"], d=["t"])
return u_t + self.c * u_x
super().__init__(equation)
from ...equation import Equation, Advection
from ...utils import check_consistency
from ...domain import CartesianDomain
def initial_condition(input_, output_):
@@ -89,13 +57,10 @@ class AdvectionProblem(SpatialProblem, TimeDependentProblem):
:type c: float | int
"""
super().__init__()
check_consistency(c, (float, int))
self.c = c
check_consistency(self.c, (float, int))
self.conditions["D"] = Condition(
domain="D", equation=AdvectionEquation(self.c)
)
self.conditions["D"] = Condition(domain="D", equation=Advection(self.c))
def solution(self, pts):
"""

View File

@@ -2,32 +2,18 @@
import torch
from ... import Condition
from ...equation import Equation
from ...domain import CartesianDomain
from ...operator import grad, laplacian
from ...problem import SpatialProblem, TimeDependentProblem
def allen_cahn_equation(input_, output_):
"""
Implementation of the Allen Cahn equation.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:return: The residual of the Allen Cahn equation.
:rtype: LabelTensor
"""
u_t = grad(output_, input_, components=["u"], d=["t"])
u_xx = laplacian(output_, input_, components=["u"], d=["x"])
return u_t - 0.0001 * u_xx + 5 * output_**3 - 5 * output_
from ...equation import Equation, AllenCahn
from ...utils import check_consistency
from ...domain import CartesianDomain
def initial_condition(input_, output_):
"""
Definition of the initial condition of the Allen Cahn problem.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the initial condition.
:rtype: LabelTensor
"""
@@ -64,6 +50,25 @@ class AllenCahnProblem(TimeDependentProblem, SpatialProblem):
}
conditions = {
"D": Condition(domain="D", equation=Equation(allen_cahn_equation)),
"t0": Condition(domain="t0", equation=Equation(initial_condition)),
}
def __init__(self, alpha=1e-4, beta=5):
"""
Initialization of the :class:`AllenCahnProblem`.
:param alpha: The diffusion coefficient.
:type alpha: float | int
:param beta: The reaction coefficient.
:type beta: float | int
"""
super().__init__()
check_consistency(alpha, (float, int))
check_consistency(beta, (float, int))
self.alpha = alpha
self.beta = beta
self.conditions["D"] = Condition(
domain="D",
equation=AllenCahn(alpha=self.alpha, beta=self.beta),
)

View File

@@ -2,40 +2,18 @@
import torch
from ... import Condition
from ...domain import CartesianDomain
from ...operator import grad, laplacian
from ...equation import Equation, FixedValue
from ...equation import Equation, FixedValue, DiffusionReaction
from ...problem import SpatialProblem, TimeDependentProblem
def diffusion_reaction(input_, output_):
"""
Implementation of the diffusion-reaction equation.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:return: The residual of the diffusion-reaction equation.
:rtype: LabelTensor
"""
x = input_.extract("x")
t = input_.extract("t")
u_t = grad(output_, input_, components=["u"], d=["t"])
u_xx = laplacian(output_, input_, components=["u"], d=["x"])
r = torch.exp(-t) * (
1.5 * torch.sin(2 * x)
+ (8 / 3) * torch.sin(3 * x)
+ (15 / 4) * torch.sin(4 * x)
+ (63 / 8) * torch.sin(8 * x)
)
return u_t - u_xx - r
from ...utils import check_consistency
from ...domain import CartesianDomain
def initial_condition(input_, output_):
"""
Definition of the initial condition of the diffusion-reaction problem.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:param LabelTensor input_: The input data of the problem.
:param LabelTensor output_: The output data of the problem.
:return: The residual of the initial condition.
:rtype: LabelTensor
"""
@@ -76,12 +54,43 @@ class DiffusionReactionProblem(TimeDependentProblem, SpatialProblem):
}
conditions = {
"D": Condition(domain="D", equation=Equation(diffusion_reaction)),
"g1": Condition(domain="g1", equation=FixedValue(0.0)),
"g2": Condition(domain="g2", equation=FixedValue(0.0)),
"t0": Condition(domain="t0", equation=Equation(initial_condition)),
}
def __init__(self, alpha=1e-4):
"""
Initialization of the :class:`DiffusionReactionProblem`.
:param alpha: The diffusion coefficient.
:type alpha: float | int
"""
super().__init__()
check_consistency(alpha, (float, int))
self.alpha = alpha
def forcing_term(input_):
"""
Implementation of the forcing term.
"""
# Extract spatial and temporal variables
spatial_d = [di for di in input_.labels if di != "t"]
x = input_.extract(spatial_d)
t = input_.extract("t")
return torch.exp(-t) * (
1.5 * torch.sin(2 * x)
+ (8 / 3) * torch.sin(3 * x)
+ (15 / 4) * torch.sin(4 * x)
+ (63 / 8) * torch.sin(8 * x)
)
self.conditions["D"] = Condition(
domain="D",
equation=DiffusionReaction(self.alpha, forcing_term),
)
def solution(self, pts):
"""
Implementation of the analytical solution of the diffusion-reaction

View File

@@ -2,46 +2,10 @@
import torch
from ... import Condition
from ...operator import laplacian
from ...equation import FixedValue, Helmholtz
from ...utils import check_consistency
from ...domain import CartesianDomain
from ...problem import SpatialProblem
from ...utils import check_consistency
from ...equation import Equation, FixedValue
class HelmholtzEquation(Equation):
"""
Implementation of the Helmholtz equation.
"""
def __init__(self, alpha):
"""
Initialization of the :class:`HelmholtzEquation` class.
:param alpha: Parameter of the forcing term.
:type alpha: float | int
"""
self.alpha = alpha
check_consistency(alpha, (int, float))
def equation(input_, output_):
"""
Implementation of the Helmholtz equation.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:return: The residual of the Helmholtz equation.
:rtype: LabelTensor
"""
lap = laplacian(output_, input_, components=["u"], d=["x", "y"])
q = (
(1 - 2 * (self.alpha * torch.pi) ** 2)
* torch.sin(self.alpha * torch.pi * input_.extract("x"))
* torch.sin(self.alpha * torch.pi * input_.extract("y"))
)
return lap + output_ - q
super().__init__(equation)
class HelmholtzProblem(SpatialProblem):
@@ -88,8 +52,19 @@ class HelmholtzProblem(SpatialProblem):
self.alpha = alpha
check_consistency(alpha, (int, float))
def forcing_term(self, input_):
"""
Implementation of the forcing term.
"""
return (
(1 - 2 * (self.alpha * torch.pi) ** 2)
* torch.sin(self.alpha * torch.pi * input_.extract("x"))
* torch.sin(self.alpha * torch.pi * input_.extract("y"))
)
self.conditions["D"] = Condition(
domain="D", equation=HelmholtzEquation(self.alpha)
domain="D",
equation=Helmholtz(self.alpha, forcing_term),
)
def solution(self, pts):

View File

@@ -1,29 +1,25 @@
"""Formulation of the Poisson problem in a square domain."""
import torch
from ... import Condition
from ...operator import laplacian
from ...equation import FixedValue, Poisson
from ...problem import SpatialProblem
from ...domain import CartesianDomain
from ...equation import Equation, FixedValue
from ... import Condition
def laplace_equation(input_, output_):
def forcing_term(input_):
"""
Implementation of the laplace equation.
Implementation of the forcing term of the Poisson problem.
:param LabelTensor input_: Input data of the problem.
:param LabelTensor output_: Output data of the problem.
:return: The residual of the laplace equation.
:param LabelTensor input_: The points where the forcing term is evaluated.
:return: The forcing term of the Poisson problem.
:rtype: LabelTensor
"""
force_term = (
return (
torch.sin(input_.extract(["x"]) * torch.pi)
* torch.sin(input_.extract(["y"]) * torch.pi)
* (2 * torch.pi**2)
)
delta_u = laplacian(output_, input_, components=["u"], d=["x", "y"])
return delta_u - force_term
class Poisson2DSquareProblem(SpatialProblem):
@@ -51,14 +47,14 @@ class Poisson2DSquareProblem(SpatialProblem):
"g2": Condition(domain="g2", equation=FixedValue(0.0)),
"g3": Condition(domain="g3", equation=FixedValue(0.0)),
"g4": Condition(domain="g4", equation=FixedValue(0.0)),
"D": Condition(domain="D", equation=Equation(laplace_equation)),
"D": Condition(domain="D", equation=Poisson(forcing_term=forcing_term)),
}
def solution(self, pts):
"""
Implementation of the analytical solution of the Poisson problem.
:param LabelTensor pts: Points where the solution is evaluated.
:param LabelTensor pts: The points where the solution is evaluated.
:return: The analytical solution of the Poisson problem.
:rtype: LabelTensor
"""

View File

@@ -0,0 +1,197 @@
from pina.equation import (
FixedValue,
FixedGradient,
FixedFlux,
FixedLaplacian,
Advection,
AllenCahn,
DiffusionReaction,
Helmholtz,
Poisson,
)
from pina import LabelTensor
import torch
import pytest
# Define input and output values
pts = LabelTensor(torch.rand(10, 3, requires_grad=True), labels=["x", "y", "t"])
u = torch.pow(pts, 2)
u.labels = ["u", "v", "w"]
@pytest.mark.parametrize("value", [0, 10, -7.5])
@pytest.mark.parametrize("components", [None, "u", ["u", "w"]])
def test_fixed_value(value, components):
# Constructor
equation = FixedValue(value=value, components=components)
# Residual
residual = equation.residual(pts, u)
len_c = len(components) if components is not None else u.shape[1]
assert residual.shape == (pts.shape[0], len_c)
@pytest.mark.parametrize("value", [0, 10, -7.5])
@pytest.mark.parametrize("components", [None, "u", ["u", "w"]])
@pytest.mark.parametrize("d", [None, "x", ["x", "y"]])
def test_fixed_gradient(value, components, d):
# Constructor
equation = FixedGradient(value=value, components=components, d=d)
# Residual
residual = equation.residual(pts, u)
len_c = len(components) if components is not None else u.shape[1]
len_d = len(d) if d is not None else pts.shape[1]
assert residual.shape == (pts.shape[0], len_c * len_d)
@pytest.mark.parametrize("value", [0, 10, -7.5])
@pytest.mark.parametrize("components", [None, "u", ["u", "w"]])
@pytest.mark.parametrize("d", [None, "x", ["x", "y"]])
def test_fixed_flux(value, components, d):
# Divergence requires components and d to be of the same length
len_c = len(components) if components is not None else u.shape[1]
len_d = len(d) if d is not None else pts.shape[1]
if len_c != len_d:
return
# Constructor
equation = FixedFlux(value=value, components=components, d=d)
# Residual
residual = equation.residual(pts, u)
assert residual.shape == (pts.shape[0], 1)
@pytest.mark.parametrize("value", [0, 10, -7.5])
@pytest.mark.parametrize("components", [None, "u", ["u", "w"]])
@pytest.mark.parametrize("d", [None, "x", ["x", "y"]])
def test_fixed_laplacian(value, components, d):
# Constructor
equation = FixedLaplacian(value=value, components=components, d=d)
# Residual
residual = equation.residual(pts, u)
len_c = len(components) if components is not None else u.shape[1]
assert residual.shape == (pts.shape[0], len_c)
@pytest.mark.parametrize("c", [1.0, 10, [1, 2.5]])
def test_advection_equation(c):
# Constructor
equation = Advection(c)
# Should fail if c is an empty list
with pytest.raises(ValueError):
Advection([])
# Should fail if c is not a float, int, or list
with pytest.raises(ValueError):
Advection("invalid")
# Residual
residual = equation.residual(pts, u)
assert residual.shape == u.shape
# Should fail if the input has no 't' label
with pytest.raises(ValueError):
residual = equation.residual(pts["x", "y"], u)
# Should fail if c is a list and its length != spatial dimension
with pytest.raises(ValueError):
Advection([1, 2, 3])
residual = equation.residual(pts, u)
@pytest.mark.parametrize("alpha", [1.0, 10, -7.5])
@pytest.mark.parametrize("beta", [1.0, 10, -7.5])
def test_allen_cahn_equation(alpha, beta):
# Constructor
equation = AllenCahn(alpha=alpha, beta=beta)
# Should fail if alpha is not a float or int
with pytest.raises(ValueError):
AllenCahn(alpha="invalid", beta=beta)
# Should fail if beta is not a float or int
with pytest.raises(ValueError):
AllenCahn(alpha=alpha, beta="invalid")
# Residual
residual = equation.residual(pts, u)
assert residual.shape == u.shape
# Should fail if the input has no 't' label
with pytest.raises(ValueError):
residual = equation.residual(pts["x", "y"], u)
@pytest.mark.parametrize("alpha", [1.0, 10, -7.5])
@pytest.mark.parametrize(
"forcing_term", [lambda x: torch.sin(x), lambda x: torch.exp(x)]
)
def test_diffusion_reaction_equation(alpha, forcing_term):
# Constructor
equation = DiffusionReaction(alpha=alpha, forcing_term=forcing_term)
# Should fail if alpha is not a float or int
with pytest.raises(ValueError):
DiffusionReaction(alpha="invalid", forcing_term=forcing_term)
# Should fail if forcing_term is not a callable
with pytest.raises(ValueError):
DiffusionReaction(alpha=alpha, forcing_term="invalid")
# Residual
residual = equation.residual(pts, u)
assert residual.shape == u.shape
# Should fail if the input has no 't' label
with pytest.raises(ValueError):
residual = equation.residual(pts["x", "y"], u)
@pytest.mark.parametrize("k", [1.0, 10, -7.5])
@pytest.mark.parametrize(
"forcing_term", [lambda x: torch.sin(x), lambda x: torch.exp(x)]
)
def test_helmholtz_equation(k, forcing_term):
# Constructor
equation = Helmholtz(k=k, forcing_term=forcing_term)
# Should fail if k is not a float or int
with pytest.raises(ValueError):
Helmholtz(k="invalid", forcing_term=forcing_term)
# Should fail if forcing_term is not a callable
with pytest.raises(ValueError):
Helmholtz(k=k, forcing_term="invalid")
# Residual
residual = equation.residual(pts, u)
assert residual.shape == u.shape
@pytest.mark.parametrize(
"forcing_term", [lambda x: torch.sin(x), lambda x: torch.exp(x)]
)
def test_poisson_equation(forcing_term):
# Constructor
equation = Poisson(forcing_term=forcing_term)
# Should fail if forcing_term is not a callable
with pytest.raises(ValueError):
Poisson(forcing_term="invalid")
# Residual
residual = equation.residual(pts, u)
assert residual.shape == u.shape

View File

@@ -5,7 +5,7 @@ from pina.problem import SpatialProblem, TimeDependentProblem
@pytest.mark.parametrize("c", [1.5, 3])
def test_constructor(c):
print(f"Testing with c = {c} (type: {type(c)})")
problem = AdvectionProblem(c=c)
problem.discretise_domain(n=10, mode="random", domains="all")
assert problem.are_all_domains_discretised
@@ -14,5 +14,6 @@ def test_constructor(c):
assert hasattr(problem, "conditions")
assert isinstance(problem.conditions, dict)
# Should fail if c is not a float or int
with pytest.raises(ValueError):
AdvectionProblem(c="a")
AdvectionProblem(c="invalid")

View File

@@ -1,12 +1,24 @@
import pytest
from pina.problem.zoo import AllenCahnProblem
from pina.problem import SpatialProblem, TimeDependentProblem
def test_constructor():
problem = AllenCahnProblem()
@pytest.mark.parametrize("alpha", [0.1, 1])
@pytest.mark.parametrize("beta", [0.1, 1])
def test_constructor(alpha, beta):
problem = AllenCahnProblem(alpha=alpha, beta=beta)
problem.discretise_domain(n=10, mode="random", domains="all")
assert problem.are_all_domains_discretised
assert isinstance(problem, SpatialProblem)
assert isinstance(problem, TimeDependentProblem)
assert hasattr(problem, "conditions")
assert isinstance(problem.conditions, dict)
# Should fail if alpha is not a float or int
with pytest.raises(ValueError):
AllenCahnProblem(alpha="invalid", beta=beta)
# Should fail if beta is not a float or int
with pytest.raises(ValueError):
AllenCahnProblem(alpha=alpha, beta="invalid")

View File

@@ -1,12 +1,19 @@
import pytest
from pina.problem.zoo import DiffusionReactionProblem
from pina.problem import TimeDependentProblem, SpatialProblem
def test_constructor():
problem = DiffusionReactionProblem()
@pytest.mark.parametrize("alpha", [0.1, 1])
def test_constructor(alpha):
problem = DiffusionReactionProblem(alpha=alpha)
problem.discretise_domain(n=10, mode="random", domains="all")
assert problem.are_all_domains_discretised
assert isinstance(problem, TimeDependentProblem)
assert isinstance(problem, SpatialProblem)
assert hasattr(problem, "conditions")
assert isinstance(problem.conditions, dict)
# Should fail if alpha is not a float or int
with pytest.raises(ValueError):
problem = DiffusionReactionProblem(alpha="invalid")

View File

@@ -5,6 +5,7 @@ from pina.problem import SpatialProblem
@pytest.mark.parametrize("alpha", [1.5, 3])
def test_constructor(alpha):
problem = HelmholtzProblem(alpha=alpha)
problem.discretise_domain(n=10, mode="random", domains="all")
assert problem.are_all_domains_discretised
@@ -13,4 +14,4 @@ def test_constructor(alpha):
assert isinstance(problem.conditions, dict)
with pytest.raises(ValueError):
HelmholtzProblem(alpha="a")
HelmholtzProblem(alpha="invalid")

View File

@@ -1,6 +1,6 @@
import pytest
from pina.problem.zoo import InversePoisson2DSquareProblem
from pina.problem import InverseProblem, SpatialProblem
import pytest
@pytest.mark.parametrize("load", [True, False])

View File

@@ -3,6 +3,7 @@ from pina.problem import SpatialProblem
def test_constructor():
problem = Poisson2DSquareProblem()
problem.discretise_domain(n=10, mode="random", domains="all")
assert problem.are_all_domains_discretised