refact
This commit is contained in:
118
pina/graph.py
Normal file
118
pina/graph.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
""" Module for Loss class """
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from torch_geometric.nn import MessagePassing, InstanceNorm, radius_graph
|
||||||
|
from torch_geometric.data import Data
|
||||||
|
import torch
|
||||||
|
|
||||||
|
class Graph:
|
||||||
|
"""
|
||||||
|
PINA Graph managing the PyG Data class.
|
||||||
|
"""
|
||||||
|
def __init__(self, data):
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _build_triangulation(**kwargs):
|
||||||
|
logging.debug("Creating graph with triangulation mode.")
|
||||||
|
|
||||||
|
# check for mandatory arguments
|
||||||
|
if "nodes_coordinates" not in kwargs:
|
||||||
|
raise ValueError("Nodes coordinates must be provided in the kwargs.")
|
||||||
|
if "nodes_data" not in kwargs:
|
||||||
|
raise ValueError("Nodes data must be provided in the kwargs.")
|
||||||
|
if "triangles" not in kwargs:
|
||||||
|
raise ValueError("Triangles must be provided in the kwargs.")
|
||||||
|
|
||||||
|
nodes_coordinates = kwargs["nodes_coordinates"]
|
||||||
|
nodes_data = kwargs["nodes_data"]
|
||||||
|
triangles = kwargs["triangles"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def less_first(a, b):
|
||||||
|
return [a, b] if a < b else [b, a]
|
||||||
|
|
||||||
|
list_of_edges = []
|
||||||
|
|
||||||
|
for triangle in triangles:
|
||||||
|
for e1, e2 in [[0, 1], [1, 2], [2, 0]]:
|
||||||
|
list_of_edges.append(less_first(triangle[e1],triangle[e2]))
|
||||||
|
|
||||||
|
array_of_edges = torch.unique(torch.Tensor(list_of_edges), dim=0) # remove duplicates
|
||||||
|
array_of_edges = array_of_edges.t().contiguous()
|
||||||
|
print(array_of_edges)
|
||||||
|
|
||||||
|
# list_of_lengths = []
|
||||||
|
|
||||||
|
# for p1,p2 in array_of_edges:
|
||||||
|
# x1, y1 = tri.points[p1]
|
||||||
|
# x2, y2 = tri.points[p2]
|
||||||
|
# list_of_lengths.append((x1-x2)**2 + (y1-y2)**2)
|
||||||
|
|
||||||
|
# array_of_lengths = np.sqrt(np.array(list_of_lengths))
|
||||||
|
|
||||||
|
# return array_of_edges, array_of_lengths
|
||||||
|
|
||||||
|
return Data(
|
||||||
|
x=nodes_data,
|
||||||
|
pos=nodes_coordinates.T,
|
||||||
|
|
||||||
|
edge_index=array_of_edges,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _build_radius(**kwargs):
|
||||||
|
logging.debug("Creating graph with radius mode.")
|
||||||
|
|
||||||
|
# check for mandatory arguments
|
||||||
|
if "nodes_coordinates" not in kwargs:
|
||||||
|
raise ValueError("Nodes coordinates must be provided in the kwargs.")
|
||||||
|
if "nodes_data" not in kwargs:
|
||||||
|
raise ValueError("Nodes data must be provided in the kwargs.")
|
||||||
|
if "radius" not in kwargs:
|
||||||
|
raise ValueError("Radius must be provided in the kwargs.")
|
||||||
|
|
||||||
|
nodes_coordinates = kwargs["nodes_coordinates"]
|
||||||
|
nodes_data = kwargs["nodes_data"]
|
||||||
|
radius = kwargs["radius"]
|
||||||
|
|
||||||
|
edges_data = kwargs.get("edge_data", None)
|
||||||
|
loop = kwargs.get("loop", False)
|
||||||
|
batch = kwargs.get("batch", None)
|
||||||
|
|
||||||
|
logging.debug(f"radius: {radius}, loop: {loop}, "
|
||||||
|
f"batch: {batch}")
|
||||||
|
|
||||||
|
edge_index = radius_graph(
|
||||||
|
x=nodes_coordinates.tensor,
|
||||||
|
r=radius,
|
||||||
|
loop=loop,
|
||||||
|
batch=batch,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.debug(f"edge_index computed")
|
||||||
|
return Data(
|
||||||
|
x=nodes_data,
|
||||||
|
pos=nodes_coordinates,
|
||||||
|
edge_index=edge_index,
|
||||||
|
edge_attr=edges_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def build(mode, **kwargs):
|
||||||
|
"""
|
||||||
|
Constructor for the `Graph` class.
|
||||||
|
"""
|
||||||
|
if mode == "radius":
|
||||||
|
graph = Graph._build_radius(**kwargs)
|
||||||
|
elif mode == "triangulation":
|
||||||
|
graph = Graph._build_triangulation(**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Mode {mode} not recognized")
|
||||||
|
|
||||||
|
return Graph(graph)
|
||||||
|
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"Graph(data={self.data})"
|
||||||
@@ -428,3 +428,7 @@ class LabelTensor(torch.Tensor):
|
|||||||
lt = super().requires_grad_(mode)
|
lt = super().requires_grad_(mode)
|
||||||
lt.labels = self.labels
|
lt.labels = self.labels
|
||||||
return lt
|
return lt
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dtype(self):
|
||||||
|
return super().dtype
|
||||||
209
pina/loss.py
209
pina/loss.py
@@ -1,209 +0,0 @@
|
|||||||
""" Module for Loss class """
|
|
||||||
|
|
||||||
from abc import ABCMeta, abstractmethod
|
|
||||||
from torch.nn.modules.loss import _Loss
|
|
||||||
import torch
|
|
||||||
from .utils import check_consistency
|
|
||||||
|
|
||||||
__all__ = ["LossInterface", "LpLoss", "PowerLoss"]
|
|
||||||
|
|
||||||
|
|
||||||
class LossInterface(_Loss, metaclass=ABCMeta):
|
|
||||||
"""
|
|
||||||
The abstract ``LossInterface`` class. All the class defining a PINA Loss
|
|
||||||
should be inheritied from this class.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, reduction="mean"):
|
|
||||||
"""
|
|
||||||
:param str reduction: Specifies the reduction to apply to the output:
|
|
||||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
|
||||||
will be applied, ``mean``: the sum of the output will be divided
|
|
||||||
by the number of elements in the output, ``sum``: the output will
|
|
||||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
|
||||||
process of being deprecated, and in the meantime, specifying either of
|
|
||||||
those two args will override ``reduction``. Default: ``mean``.
|
|
||||||
"""
|
|
||||||
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def forward(self, input, target):
|
|
||||||
"""Forward method for loss function.
|
|
||||||
|
|
||||||
:param torch.Tensor input: Input tensor from real data.
|
|
||||||
:param torch.Tensor target: Model tensor output.
|
|
||||||
:return: Loss evaluation.
|
|
||||||
:rtype: torch.Tensor
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _reduction(self, loss):
|
|
||||||
"""Simple helper function to check reduction
|
|
||||||
|
|
||||||
:param reduction: Specifies the reduction to apply to the output:
|
|
||||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
|
||||||
will be applied, ``mean``: the sum of the output will be divided
|
|
||||||
by the number of elements in the output, ``sum``: the output will
|
|
||||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
|
||||||
process of being deprecated, and in the meantime, specifying either of
|
|
||||||
those two args will override ``reduction``. Default: ``mean``.
|
|
||||||
:type reduction: str
|
|
||||||
:param loss: Loss tensor for each element.
|
|
||||||
:type loss: torch.Tensor
|
|
||||||
:return: Reduced loss.
|
|
||||||
:rtype: torch.Tensor
|
|
||||||
"""
|
|
||||||
if self.reduction == "none":
|
|
||||||
ret = loss
|
|
||||||
elif self.reduction == "mean":
|
|
||||||
ret = torch.mean(loss, keepdim=True, dim=-1)
|
|
||||||
elif self.reduction == "sum":
|
|
||||||
ret = torch.sum(loss, keepdim=True, dim=-1)
|
|
||||||
else:
|
|
||||||
raise ValueError(self.reduction + " is not valid")
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
class LpLoss(LossInterface):
|
|
||||||
r"""
|
|
||||||
The Lp loss implementation class. Creates a criterion that measures
|
|
||||||
the Lp error between each element in the input :math:`x` and
|
|
||||||
target :math:`y`.
|
|
||||||
|
|
||||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
|
||||||
be described as:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
|
||||||
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
|
||||||
|
|
||||||
If ``'relative'`` is set to true:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
|
||||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
|
|
||||||
|
|
||||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
|
||||||
(default ``mean``), then:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) =
|
|
||||||
\begin{cases}
|
|
||||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
|
||||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
|
||||||
\end{cases}
|
|
||||||
|
|
||||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
|
||||||
of :math:`n` elements each.
|
|
||||||
|
|
||||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
|
||||||
|
|
||||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, p=2, reduction="mean", relative=False):
|
|
||||||
"""
|
|
||||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
|
||||||
be calculated. See `list of possible orders in torch linalg
|
|
||||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
|
||||||
for possible degrees. Default 2 (euclidean norm).
|
|
||||||
:param str reduction: Specifies the reduction to apply to the output:
|
|
||||||
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
|
||||||
will be applied, ``mean``: the sum of the output will be divided
|
|
||||||
by the number of elements in the output, ``sum``: the output will
|
|
||||||
be summed.
|
|
||||||
:param bool relative: Specifies if relative error should be computed.
|
|
||||||
"""
|
|
||||||
super().__init__(reduction=reduction)
|
|
||||||
|
|
||||||
# check consistency
|
|
||||||
check_consistency(p, (str, int, float))
|
|
||||||
check_consistency(relative, bool)
|
|
||||||
|
|
||||||
self.p = p
|
|
||||||
self.relative = relative
|
|
||||||
|
|
||||||
def forward(self, input, target):
|
|
||||||
"""Forward method for loss function.
|
|
||||||
|
|
||||||
:param torch.Tensor input: Input tensor from real data.
|
|
||||||
:param torch.Tensor target: Model tensor output.
|
|
||||||
:return: Loss evaluation.
|
|
||||||
:rtype: torch.Tensor
|
|
||||||
"""
|
|
||||||
loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
|
|
||||||
if self.relative:
|
|
||||||
loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
|
|
||||||
return self._reduction(loss)
|
|
||||||
|
|
||||||
|
|
||||||
class PowerLoss(LossInterface):
|
|
||||||
r"""
|
|
||||||
The PowerLoss loss implementation class. Creates a criterion that measures
|
|
||||||
the error between each element in the input :math:`x` and
|
|
||||||
target :math:`y` powered to a specific integer.
|
|
||||||
|
|
||||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
|
||||||
be described as:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
|
||||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
|
||||||
|
|
||||||
If ``'relative'`` is set to true:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
|
||||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
|
|
||||||
|
|
||||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
|
||||||
(default ``mean``), then:
|
|
||||||
|
|
||||||
.. math::
|
|
||||||
\ell(x, y) =
|
|
||||||
\begin{cases}
|
|
||||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
|
||||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
|
||||||
\end{cases}
|
|
||||||
|
|
||||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
|
||||||
of :math:`n` elements each.
|
|
||||||
|
|
||||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
|
||||||
|
|
||||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, p=2, reduction="mean", relative=False):
|
|
||||||
"""
|
|
||||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
|
||||||
be calculated. See `list of possible orders in torch linalg
|
|
||||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
|
||||||
see the possible degrees. Default 2 (euclidean norm).
|
|
||||||
:param str reduction: Specifies the reduction to apply to the output:
|
|
||||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
|
||||||
will be applied, ``mean``: the sum of the output will be divided
|
|
||||||
by the number of elements in the output, ``sum``: the output will
|
|
||||||
be summed.
|
|
||||||
:param bool relative: Specifies if relative error should be computed.
|
|
||||||
"""
|
|
||||||
super().__init__(reduction=reduction)
|
|
||||||
|
|
||||||
# check consistency
|
|
||||||
check_consistency(p, (str, int, float))
|
|
||||||
self.p = p
|
|
||||||
check_consistency(relative, bool)
|
|
||||||
self.relative = relative
|
|
||||||
|
|
||||||
def forward(self, input, target):
|
|
||||||
"""Forward method for loss function.
|
|
||||||
|
|
||||||
:param torch.Tensor input: Input tensor from real data.
|
|
||||||
:param torch.Tensor target: Model tensor output.
|
|
||||||
:return: Loss evaluation.
|
|
||||||
:rtype: torch.Tensor
|
|
||||||
"""
|
|
||||||
loss = torch.abs((input - target)).pow(self.p).mean(-1)
|
|
||||||
if self.relative:
|
|
||||||
loss = loss / torch.abs(input).pow(self.p).mean(-1)
|
|
||||||
return self._reduction(loss)
|
|
||||||
9
pina/loss/__init__.py
Normal file
9
pina/loss/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
__all__ = [
|
||||||
|
'LpLoss',
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
from .loss_interface import LossInterface
|
||||||
|
from .power_loss import PowerLoss
|
||||||
|
from .lp_loss import LpLoss
|
||||||
|
from .weightning_interface import weightningInterface
|
||||||
61
pina/loss/loss_interface.py
Normal file
61
pina/loss/loss_interface.py
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
""" Module for Loss Interface """
|
||||||
|
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
from torch.nn.modules.loss import _Loss
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class LossInterface(_Loss, metaclass=ABCMeta):
|
||||||
|
"""
|
||||||
|
The abstract ``LossInterface`` class. All the class defining a PINA Loss
|
||||||
|
should be inheritied from this class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, reduction="mean"):
|
||||||
|
"""
|
||||||
|
:param str reduction: Specifies the reduction to apply to the output:
|
||||||
|
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||||
|
will be applied, ``mean``: the sum of the output will be divided
|
||||||
|
by the number of elements in the output, ``sum``: the output will
|
||||||
|
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||||
|
process of being deprecated, and in the meantime, specifying either of
|
||||||
|
those two args will override ``reduction``. Default: ``mean``.
|
||||||
|
"""
|
||||||
|
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def forward(self, input, target):
|
||||||
|
"""Forward method for loss function.
|
||||||
|
|
||||||
|
:param torch.Tensor input: Input tensor from real data.
|
||||||
|
:param torch.Tensor target: Model tensor output.
|
||||||
|
:return: Loss evaluation.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _reduction(self, loss):
|
||||||
|
"""Simple helper function to check reduction
|
||||||
|
|
||||||
|
:param reduction: Specifies the reduction to apply to the output:
|
||||||
|
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||||
|
will be applied, ``mean``: the sum of the output will be divided
|
||||||
|
by the number of elements in the output, ``sum``: the output will
|
||||||
|
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||||
|
process of being deprecated, and in the meantime, specifying either of
|
||||||
|
those two args will override ``reduction``. Default: ``mean``.
|
||||||
|
:type reduction: str
|
||||||
|
:param loss: Loss tensor for each element.
|
||||||
|
:type loss: torch.Tensor
|
||||||
|
:return: Reduced loss.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
if self.reduction == "none":
|
||||||
|
ret = loss
|
||||||
|
elif self.reduction == "mean":
|
||||||
|
ret = torch.mean(loss, keepdim=True, dim=-1)
|
||||||
|
elif self.reduction == "sum":
|
||||||
|
ret = torch.sum(loss, keepdim=True, dim=-1)
|
||||||
|
else:
|
||||||
|
raise ValueError(self.reduction + " is not valid")
|
||||||
|
return ret
|
||||||
78
pina/loss/lp_loss.py
Normal file
78
pina/loss/lp_loss.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
""" Module for LpLoss class """
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from ..utils import check_consistency
|
||||||
|
from .loss_interface import LossInterface
|
||||||
|
|
||||||
|
class LpLoss(LossInterface):
|
||||||
|
r"""
|
||||||
|
The Lp loss implementation class. Creates a criterion that measures
|
||||||
|
the Lp error between each element in the input :math:`x` and
|
||||||
|
target :math:`y`.
|
||||||
|
|
||||||
|
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||||
|
be described as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||||
|
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||||
|
|
||||||
|
If ``'relative'`` is set to true:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||||
|
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||||
|
|
||||||
|
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||||
|
(default ``mean``), then:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) =
|
||||||
|
\begin{cases}
|
||||||
|
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||||
|
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||||
|
\end{cases}
|
||||||
|
|
||||||
|
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||||
|
of :math:`n` elements each.
|
||||||
|
|
||||||
|
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||||
|
|
||||||
|
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, p=2, reduction="mean", relative=False):
|
||||||
|
"""
|
||||||
|
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||||
|
be calculated. See `list of possible orders in torch linalg
|
||||||
|
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||||
|
for possible degrees. Default 2 (euclidean norm).
|
||||||
|
:param str reduction: Specifies the reduction to apply to the output:
|
||||||
|
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
||||||
|
will be applied, ``mean``: the sum of the output will be divided
|
||||||
|
by the number of elements in the output, ``sum``: the output will
|
||||||
|
be summed.
|
||||||
|
:param bool relative: Specifies if relative error should be computed.
|
||||||
|
"""
|
||||||
|
super().__init__(reduction=reduction)
|
||||||
|
|
||||||
|
# check consistency
|
||||||
|
check_consistency(p, (str, int, float))
|
||||||
|
check_consistency(relative, bool)
|
||||||
|
|
||||||
|
self.p = p
|
||||||
|
self.relative = relative
|
||||||
|
|
||||||
|
def forward(self, input, target):
|
||||||
|
"""Forward method for loss function.
|
||||||
|
|
||||||
|
:param torch.Tensor input: Input tensor from real data.
|
||||||
|
:param torch.Tensor target: Model tensor output.
|
||||||
|
:return: Loss evaluation.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
|
||||||
|
if self.relative:
|
||||||
|
loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
|
||||||
|
return self._reduction(loss)
|
||||||
79
pina/loss/power_loss.py
Normal file
79
pina/loss/power_loss.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
""" Module for PowerLoss class """
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from ..utils import check_consistency
|
||||||
|
from .loss_interface import LossInterface
|
||||||
|
|
||||||
|
|
||||||
|
class PowerLoss(LossInterface):
|
||||||
|
r"""
|
||||||
|
The PowerLoss loss implementation class. Creates a criterion that measures
|
||||||
|
the error between each element in the input :math:`x` and
|
||||||
|
target :math:`y` powered to a specific integer.
|
||||||
|
|
||||||
|
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||||
|
be described as:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||||
|
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||||
|
|
||||||
|
If ``'relative'`` is set to true:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||||
|
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
|
||||||
|
|
||||||
|
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||||
|
(default ``mean``), then:
|
||||||
|
|
||||||
|
.. math::
|
||||||
|
\ell(x, y) =
|
||||||
|
\begin{cases}
|
||||||
|
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||||
|
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||||
|
\end{cases}
|
||||||
|
|
||||||
|
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||||
|
of :math:`n` elements each.
|
||||||
|
|
||||||
|
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||||
|
|
||||||
|
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, p=2, reduction="mean", relative=False):
|
||||||
|
"""
|
||||||
|
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||||
|
be calculated. See `list of possible orders in torch linalg
|
||||||
|
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||||
|
see the possible degrees. Default 2 (euclidean norm).
|
||||||
|
:param str reduction: Specifies the reduction to apply to the output:
|
||||||
|
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||||
|
will be applied, ``mean``: the sum of the output will be divided
|
||||||
|
by the number of elements in the output, ``sum``: the output will
|
||||||
|
be summed.
|
||||||
|
:param bool relative: Specifies if relative error should be computed.
|
||||||
|
"""
|
||||||
|
super().__init__(reduction=reduction)
|
||||||
|
|
||||||
|
# check consistency
|
||||||
|
check_consistency(p, (str, int, float))
|
||||||
|
check_consistency(relative, bool)
|
||||||
|
|
||||||
|
self.p = p
|
||||||
|
self.relative = relative
|
||||||
|
|
||||||
|
def forward(self, input, target):
|
||||||
|
"""Forward method for loss function.
|
||||||
|
|
||||||
|
:param torch.Tensor input: Input tensor from real data.
|
||||||
|
:param torch.Tensor target: Model tensor output.
|
||||||
|
:return: Loss evaluation.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
loss = torch.abs((input - target)).pow(self.p).mean(-1)
|
||||||
|
if self.relative:
|
||||||
|
loss = loss / torch.abs(input).pow(self.p).mean(-1)
|
||||||
|
return self._reduction(loss)
|
||||||
35
pina/loss/weighted_aggregation.py
Normal file
35
pina/loss/weighted_aggregation.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
""" Module for Loss Interface """
|
||||||
|
|
||||||
|
from .weightning_interface import weightningInterface
|
||||||
|
|
||||||
|
|
||||||
|
class WeightedAggregation(WeightningInterface):
|
||||||
|
"""
|
||||||
|
TODO
|
||||||
|
"""
|
||||||
|
def __init__(self, aggr='mean', weights=None):
|
||||||
|
self.aggr = aggr
|
||||||
|
self.weights = weights
|
||||||
|
|
||||||
|
def aggregate(self, losses):
|
||||||
|
"""
|
||||||
|
Aggregate the losses.
|
||||||
|
|
||||||
|
:param dict(torch.Tensor) input: The dictionary of losses.
|
||||||
|
:return: The losses aggregation. It should be a scalar Tensor.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
if self.weights:
|
||||||
|
weighted_losses = {
|
||||||
|
condition: self.weights[condition] * losses[condition]
|
||||||
|
for condition in losses
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
weighted_losses = losses
|
||||||
|
|
||||||
|
if self.aggr == 'mean':
|
||||||
|
return sum(weighted_losses.values()) / len(weighted_losses)
|
||||||
|
elif self.aggr == 'sum':
|
||||||
|
return sum(weighted_losses.values())
|
||||||
|
else:
|
||||||
|
raise ValueError(self.aggr + " is not valid for aggregation.")
|
||||||
24
pina/loss/weightning_interface.py
Normal file
24
pina/loss/weightning_interface.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
""" Module for Loss Interface """
|
||||||
|
|
||||||
|
from abc import ABCMeta, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class weightningInterface(metaclass=ABCMeta):
|
||||||
|
"""
|
||||||
|
The ``weightingInterface`` class. TODO
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def aggregate(self, losses):
|
||||||
|
"""
|
||||||
|
Aggregate the losses.
|
||||||
|
|
||||||
|
:param list(torch.Tensor) input: The list
|
||||||
|
:return: The losses aggregation. It should be a scalar Tensor.
|
||||||
|
:rtype: torch.Tensor
|
||||||
|
"""
|
||||||
|
pass
|
||||||
11
pina/model/layers/messa_passing.py
Normal file
11
pina/model/layers/messa_passing.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
""" Module for Averaging Neural Operator Layer class. """
|
||||||
|
|
||||||
|
from torch import nn, mean
|
||||||
|
from torch_geometric.nn import MessagePassing, InstanceNorm, radius_graph
|
||||||
|
|
||||||
|
from pina.utils import check_consistency
|
||||||
|
|
||||||
|
|
||||||
|
class MessagePassingBlock(nn.Module):
|
||||||
|
|
||||||
|
|
||||||
@@ -6,7 +6,7 @@ import torch
|
|||||||
|
|
||||||
from ...solvers.solver import SolverInterface
|
from ...solvers.solver import SolverInterface
|
||||||
from pina.utils import check_consistency
|
from pina.utils import check_consistency
|
||||||
from pina.loss import LossInterface
|
from pina.loss.loss_interface import LossInterface
|
||||||
from pina.problem import InverseProblem
|
from pina.problem import InverseProblem
|
||||||
from torch.nn.modules.loss import _Loss
|
from torch.nn.modules.loss import _Loss
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ from ..optim import Optimizer, Scheduler, TorchOptimizer, TorchScheduler
|
|||||||
from .solver import SolverInterface
|
from .solver import SolverInterface
|
||||||
from ..label_tensor import LabelTensor
|
from ..label_tensor import LabelTensor
|
||||||
from ..utils import check_consistency
|
from ..utils import check_consistency
|
||||||
from ..loss import LossInterface
|
from ..loss.loss_interface import LossInterface
|
||||||
|
|
||||||
|
|
||||||
class SupervisedSolver(SolverInterface):
|
class SupervisedSolver(SolverInterface):
|
||||||
@@ -172,10 +172,6 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:return: The residual loss averaged on the input coordinates
|
:return: The residual loss averaged on the input coordinates
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
print(input_pts)
|
|
||||||
print(output_pts)
|
|
||||||
print(self.loss)
|
|
||||||
print(self.forward(input_pts))
|
|
||||||
return self.loss(self.forward(input_pts), output_pts)
|
return self.loss(self.forward(input_pts), output_pts)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from pina.loss import *
|
from pina.loss.loss_interface import *
|
||||||
|
|
||||||
input = torch.tensor([[3.], [1.], [-8.]])
|
input = torch.tensor([[3.], [1.], [-8.]])
|
||||||
target = torch.tensor([[6.], [4.], [2.]])
|
target = torch.tensor([[6.], [4.], [2.]])
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from pina.loss import PowerLoss
|
from pina.loss.loss_interface import PowerLoss
|
||||||
|
|
||||||
input = torch.tensor([[3.], [1.], [-8.]])
|
input = torch.tensor([[3.], [1.], [-8.]])
|
||||||
target = torch.tensor([[6.], [4.], [2.]])
|
target = torch.tensor([[6.], [4.], [2.]])
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from pina.trainer import Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.equation.equation import Equation
|
from pina.equation.equation import Equation
|
||||||
from pina.equation.equation_factory import FixedValue
|
from pina.equation.equation_factory import FixedValue
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from pina.trainer import Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.equation.equation import Equation
|
from pina.equation.equation import Equation
|
||||||
from pina.equation.equation_factory import FixedValue
|
from pina.equation.equation_factory import FixedValue
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
def laplace_equation(input_, output_):
|
def laplace_equation(input_, output_):
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from pina.trainer import Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.equation.equation import Equation
|
from pina.equation.equation import Equation
|
||||||
from pina.equation.equation_factory import FixedValue
|
from pina.equation.equation_factory import FixedValue
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
def laplace_equation(input_, output_):
|
def laplace_equation(input_, output_):
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from pina.trainer import Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.equation.equation import Equation
|
from pina.equation.equation import Equation
|
||||||
from pina.equation.equation_factory import FixedValue
|
from pina.equation.equation_factory import FixedValue
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
def laplace_equation(input_, output_):
|
def laplace_equation(input_, output_):
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from pina import Condition, LabelTensor
|
|||||||
from pina.solvers import ReducedOrderModelSolver
|
from pina.solvers import ReducedOrderModelSolver
|
||||||
from pina.trainer import Trainer
|
from pina.trainer import Trainer
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
class NeuralOperatorProblem(AbstractProblem):
|
class NeuralOperatorProblem(AbstractProblem):
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from pina.trainer import Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.equation.equation import Equation
|
from pina.equation.equation import Equation
|
||||||
from pina.equation.equation_factory import FixedValue
|
from pina.equation.equation_factory import FixedValue
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
def laplace_equation(input_, output_):
|
def laplace_equation(input_, output_):
|
||||||
|
|||||||
@@ -5,14 +5,17 @@ from pina import Condition, LabelTensor
|
|||||||
from pina.solvers import SupervisedSolver
|
from pina.solvers import SupervisedSolver
|
||||||
from pina.trainer import Trainer
|
from pina.trainer import Trainer
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
|
|
||||||
class NeuralOperatorProblem(AbstractProblem):
|
class NeuralOperatorProblem(AbstractProblem):
|
||||||
input_variables = ['u_0', 'u_1']
|
input_variables = ['u_0', 'u_1']
|
||||||
output_variables = ['u']
|
output_variables = ['u']
|
||||||
domains = {
|
domains = {
|
||||||
'pts': LabelTensor(torch.rand(100, 2), labels={1: {'name': 'space', 'dof': ['u_0', 'u_1']}})
|
'pts': LabelTensor(
|
||||||
|
torch.rand(100, 2),
|
||||||
|
labels={1: {'name': 'space', 'dof': ['u_0', 'u_1']}}
|
||||||
|
)
|
||||||
}
|
}
|
||||||
conditions = {
|
conditions = {
|
||||||
'data' : Condition(
|
'data' : Condition(
|
||||||
@@ -56,9 +59,51 @@ def test_constructor():
|
|||||||
# SupervisedSolver(problem=problem, model=model_extra_feats, extra_features=extra_feats)
|
# SupervisedSolver(problem=problem, model=model_extra_feats, extra_features=extra_feats)
|
||||||
|
|
||||||
|
|
||||||
|
class AutoSolver(SupervisedSolver):
|
||||||
|
|
||||||
|
def forward(self, input):
|
||||||
|
from pina.graph import Graph
|
||||||
|
print(Graph)
|
||||||
|
print(input)
|
||||||
|
if not isinstance(input, Graph):
|
||||||
|
input = Graph.build('radius', nodes_coordinates=input, nodes_data=torch.rand(input.shape), radius=0.2)
|
||||||
|
print(input)
|
||||||
|
print(input.data.edge_index)
|
||||||
|
print(input.data)
|
||||||
|
g = self.model[0](input.data, edge_index=input.data.edge_index)
|
||||||
|
g.labels = {1: {'name': 'output', 'dof': ['u']}}
|
||||||
|
return g
|
||||||
|
du_dt_new = LabelTensor(self.model[0](graph).reshape(-1,1), labels = ['du'])
|
||||||
|
|
||||||
|
return du_dt_new
|
||||||
|
|
||||||
|
class GraphModel(torch.nn.Module):
|
||||||
|
def __init__(self, in_channels, out_channels):
|
||||||
|
from torch_geometric.nn import GCNConv, NNConv
|
||||||
|
super().__init__()
|
||||||
|
self.conv1 = GCNConv(in_channels, 16)
|
||||||
|
self.conv2 = GCNConv(16, out_channels)
|
||||||
|
|
||||||
|
def forward(self, data, edge_index):
|
||||||
|
print(data)
|
||||||
|
x = data.x
|
||||||
|
print(x)
|
||||||
|
x = self.conv1(x, edge_index)
|
||||||
|
x = x.relu()
|
||||||
|
x = self.conv2(x, edge_index)
|
||||||
|
return x
|
||||||
|
|
||||||
|
def test_graph():
|
||||||
|
|
||||||
|
solver = AutoSolver(problem = problem, model=GraphModel(2, 1), loss=LpLoss())
|
||||||
|
trainer = Trainer(solver=solver, max_epochs=30, accelerator='cpu', batch_size=20)
|
||||||
|
trainer.train()
|
||||||
|
assert False
|
||||||
|
|
||||||
|
|
||||||
def test_train_cpu():
|
def test_train_cpu():
|
||||||
solver = SupervisedSolver(problem = problem, model=model, loss=LpLoss())
|
solver = SupervisedSolver(problem = problem, model=model, loss=LpLoss())
|
||||||
trainer = Trainer(solver=solver, max_epochs=3, accelerator='cpu', batch_size=20)
|
trainer = Trainer(solver=solver, max_epochs=300, accelerator='cpu', batch_size=20)
|
||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
2
tutorials/tutorial10/tutorial.py
vendored
2
tutorials/tutorial10/tutorial.py
vendored
@@ -242,7 +242,7 @@ plot_trajectory(coords=initial_cond_test[sample_number].extract(['x', 't']),
|
|||||||
# In[8]:
|
# In[8]:
|
||||||
|
|
||||||
|
|
||||||
from pina.loss import PowerLoss
|
from pina.loss.loss_interface import PowerLoss
|
||||||
|
|
||||||
error_metric = PowerLoss(p=2) # we use the MSE loss
|
error_metric = PowerLoss(p=2) # we use the MSE loss
|
||||||
|
|
||||||
|
|||||||
2
tutorials/tutorial5/tutorial.py
vendored
2
tutorials/tutorial5/tutorial.py
vendored
@@ -116,7 +116,7 @@ trainer.train()
|
|||||||
# In[19]:
|
# In[19]:
|
||||||
|
|
||||||
|
|
||||||
from pina.loss import LpLoss
|
from pina.loss.loss_interface import LpLoss
|
||||||
|
|
||||||
# make the metric
|
# make the metric
|
||||||
metric_err = LpLoss(relative=True)
|
metric_err = LpLoss(relative=True)
|
||||||
|
|||||||
Reference in New Issue
Block a user