refact
This commit is contained in:
118
pina/graph.py
Normal file
118
pina/graph.py
Normal file
@@ -0,0 +1,118 @@
|
||||
""" Module for Loss class """
|
||||
|
||||
import logging
|
||||
from torch_geometric.nn import MessagePassing, InstanceNorm, radius_graph
|
||||
from torch_geometric.data import Data
|
||||
import torch
|
||||
|
||||
class Graph:
|
||||
"""
|
||||
PINA Graph managing the PyG Data class.
|
||||
"""
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
@staticmethod
|
||||
def _build_triangulation(**kwargs):
|
||||
logging.debug("Creating graph with triangulation mode.")
|
||||
|
||||
# check for mandatory arguments
|
||||
if "nodes_coordinates" not in kwargs:
|
||||
raise ValueError("Nodes coordinates must be provided in the kwargs.")
|
||||
if "nodes_data" not in kwargs:
|
||||
raise ValueError("Nodes data must be provided in the kwargs.")
|
||||
if "triangles" not in kwargs:
|
||||
raise ValueError("Triangles must be provided in the kwargs.")
|
||||
|
||||
nodes_coordinates = kwargs["nodes_coordinates"]
|
||||
nodes_data = kwargs["nodes_data"]
|
||||
triangles = kwargs["triangles"]
|
||||
|
||||
|
||||
|
||||
def less_first(a, b):
|
||||
return [a, b] if a < b else [b, a]
|
||||
|
||||
list_of_edges = []
|
||||
|
||||
for triangle in triangles:
|
||||
for e1, e2 in [[0, 1], [1, 2], [2, 0]]:
|
||||
list_of_edges.append(less_first(triangle[e1],triangle[e2]))
|
||||
|
||||
array_of_edges = torch.unique(torch.Tensor(list_of_edges), dim=0) # remove duplicates
|
||||
array_of_edges = array_of_edges.t().contiguous()
|
||||
print(array_of_edges)
|
||||
|
||||
# list_of_lengths = []
|
||||
|
||||
# for p1,p2 in array_of_edges:
|
||||
# x1, y1 = tri.points[p1]
|
||||
# x2, y2 = tri.points[p2]
|
||||
# list_of_lengths.append((x1-x2)**2 + (y1-y2)**2)
|
||||
|
||||
# array_of_lengths = np.sqrt(np.array(list_of_lengths))
|
||||
|
||||
# return array_of_edges, array_of_lengths
|
||||
|
||||
return Data(
|
||||
x=nodes_data,
|
||||
pos=nodes_coordinates.T,
|
||||
|
||||
edge_index=array_of_edges,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _build_radius(**kwargs):
|
||||
logging.debug("Creating graph with radius mode.")
|
||||
|
||||
# check for mandatory arguments
|
||||
if "nodes_coordinates" not in kwargs:
|
||||
raise ValueError("Nodes coordinates must be provided in the kwargs.")
|
||||
if "nodes_data" not in kwargs:
|
||||
raise ValueError("Nodes data must be provided in the kwargs.")
|
||||
if "radius" not in kwargs:
|
||||
raise ValueError("Radius must be provided in the kwargs.")
|
||||
|
||||
nodes_coordinates = kwargs["nodes_coordinates"]
|
||||
nodes_data = kwargs["nodes_data"]
|
||||
radius = kwargs["radius"]
|
||||
|
||||
edges_data = kwargs.get("edge_data", None)
|
||||
loop = kwargs.get("loop", False)
|
||||
batch = kwargs.get("batch", None)
|
||||
|
||||
logging.debug(f"radius: {radius}, loop: {loop}, "
|
||||
f"batch: {batch}")
|
||||
|
||||
edge_index = radius_graph(
|
||||
x=nodes_coordinates.tensor,
|
||||
r=radius,
|
||||
loop=loop,
|
||||
batch=batch,
|
||||
)
|
||||
|
||||
logging.debug(f"edge_index computed")
|
||||
return Data(
|
||||
x=nodes_data,
|
||||
pos=nodes_coordinates,
|
||||
edge_index=edge_index,
|
||||
edge_attr=edges_data,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def build(mode, **kwargs):
|
||||
"""
|
||||
Constructor for the `Graph` class.
|
||||
"""
|
||||
if mode == "radius":
|
||||
graph = Graph._build_radius(**kwargs)
|
||||
elif mode == "triangulation":
|
||||
graph = Graph._build_triangulation(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Mode {mode} not recognized")
|
||||
|
||||
return Graph(graph)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return f"Graph(data={self.data})"
|
||||
@@ -427,4 +427,8 @@ class LabelTensor(torch.Tensor):
|
||||
def requires_grad_(self, mode=True):
|
||||
lt = super().requires_grad_(mode)
|
||||
lt.labels = self.labels
|
||||
return lt
|
||||
return lt
|
||||
|
||||
@property
|
||||
def dtype(self):
|
||||
return super().dtype
|
||||
209
pina/loss.py
209
pina/loss.py
@@ -1,209 +0,0 @@
|
||||
""" Module for Loss class """
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from torch.nn.modules.loss import _Loss
|
||||
import torch
|
||||
from .utils import check_consistency
|
||||
|
||||
__all__ = ["LossInterface", "LpLoss", "PowerLoss"]
|
||||
|
||||
|
||||
class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
"""
|
||||
The abstract ``LossInterface`` class. All the class defining a PINA Loss
|
||||
should be inheritied from this class.
|
||||
"""
|
||||
|
||||
def __init__(self, reduction="mean"):
|
||||
"""
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
"""
|
||||
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
pass
|
||||
|
||||
def _reduction(self, loss):
|
||||
"""Simple helper function to check reduction
|
||||
|
||||
:param reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
:type reduction: str
|
||||
:param loss: Loss tensor for each element.
|
||||
:type loss: torch.Tensor
|
||||
:return: Reduced loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
if self.reduction == "none":
|
||||
ret = loss
|
||||
elif self.reduction == "mean":
|
||||
ret = torch.mean(loss, keepdim=True, dim=-1)
|
||||
elif self.reduction == "sum":
|
||||
ret = torch.sum(loss, keepdim=True, dim=-1)
|
||||
else:
|
||||
raise ValueError(self.reduction + " is not valid")
|
||||
return ret
|
||||
|
||||
|
||||
class LpLoss(LossInterface):
|
||||
r"""
|
||||
The Lp loss implementation class. Creates a criterion that measures
|
||||
the Lp error between each element in the input :math:`x` and
|
||||
target :math:`y`.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
\begin{cases}
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
for possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
# check consistency
|
||||
check_consistency(p, (str, int, float))
|
||||
check_consistency(relative, bool)
|
||||
|
||||
self.p = p
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
|
||||
if self.relative:
|
||||
loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
|
||||
return self._reduction(loss)
|
||||
|
||||
|
||||
class PowerLoss(LossInterface):
|
||||
r"""
|
||||
The PowerLoss loss implementation class. Creates a criterion that measures
|
||||
the error between each element in the input :math:`x` and
|
||||
target :math:`y` powered to a specific integer.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
\begin{cases}
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
see the possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
# check consistency
|
||||
check_consistency(p, (str, int, float))
|
||||
self.p = p
|
||||
check_consistency(relative, bool)
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
loss = torch.abs((input - target)).pow(self.p).mean(-1)
|
||||
if self.relative:
|
||||
loss = loss / torch.abs(input).pow(self.p).mean(-1)
|
||||
return self._reduction(loss)
|
||||
9
pina/loss/__init__.py
Normal file
9
pina/loss/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
__all__ = [
|
||||
'LpLoss',
|
||||
|
||||
]
|
||||
|
||||
from .loss_interface import LossInterface
|
||||
from .power_loss import PowerLoss
|
||||
from .lp_loss import LpLoss
|
||||
from .weightning_interface import weightningInterface
|
||||
61
pina/loss/loss_interface.py
Normal file
61
pina/loss/loss_interface.py
Normal file
@@ -0,0 +1,61 @@
|
||||
""" Module for Loss Interface """
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from torch.nn.modules.loss import _Loss
|
||||
import torch
|
||||
|
||||
|
||||
class LossInterface(_Loss, metaclass=ABCMeta):
|
||||
"""
|
||||
The abstract ``LossInterface`` class. All the class defining a PINA Loss
|
||||
should be inheritied from this class.
|
||||
"""
|
||||
|
||||
def __init__(self, reduction="mean"):
|
||||
"""
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
"""
|
||||
super().__init__(reduction=reduction, size_average=None, reduce=None)
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
pass
|
||||
|
||||
def _reduction(self, loss):
|
||||
"""Simple helper function to check reduction
|
||||
|
||||
:param reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed. Note: ``size_average`` and ``reduce`` are in the
|
||||
process of being deprecated, and in the meantime, specifying either of
|
||||
those two args will override ``reduction``. Default: ``mean``.
|
||||
:type reduction: str
|
||||
:param loss: Loss tensor for each element.
|
||||
:type loss: torch.Tensor
|
||||
:return: Reduced loss.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
if self.reduction == "none":
|
||||
ret = loss
|
||||
elif self.reduction == "mean":
|
||||
ret = torch.mean(loss, keepdim=True, dim=-1)
|
||||
elif self.reduction == "sum":
|
||||
ret = torch.sum(loss, keepdim=True, dim=-1)
|
||||
else:
|
||||
raise ValueError(self.reduction + " is not valid")
|
||||
return ret
|
||||
78
pina/loss/lp_loss.py
Normal file
78
pina/loss/lp_loss.py
Normal file
@@ -0,0 +1,78 @@
|
||||
""" Module for LpLoss class """
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import check_consistency
|
||||
from .loss_interface import LossInterface
|
||||
|
||||
class LpLoss(LossInterface):
|
||||
r"""
|
||||
The Lp loss implementation class. Creates a criterion that measures
|
||||
the Lp error between each element in the input :math:`x` and
|
||||
target :math:`y`.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
\begin{cases}
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
for possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
# check consistency
|
||||
check_consistency(p, (str, int, float))
|
||||
check_consistency(relative, bool)
|
||||
|
||||
self.p = p
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
|
||||
if self.relative:
|
||||
loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
|
||||
return self._reduction(loss)
|
||||
79
pina/loss/power_loss.py
Normal file
79
pina/loss/power_loss.py
Normal file
@@ -0,0 +1,79 @@
|
||||
""" Module for PowerLoss class """
|
||||
|
||||
import torch
|
||||
|
||||
from ..utils import check_consistency
|
||||
from .loss_interface import LossInterface
|
||||
|
||||
|
||||
class PowerLoss(LossInterface):
|
||||
r"""
|
||||
The PowerLoss loss implementation class. Creates a criterion that measures
|
||||
the error between each element in the input :math:`x` and
|
||||
target :math:`y` powered to a specific integer.
|
||||
|
||||
The unreduced (i.e. with ``reduction`` set to ``none``) loss can
|
||||
be described as:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{1}{D}\left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
|
||||
|
||||
If ``'relative'`` is set to true:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
|
||||
l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p},
|
||||
|
||||
where :math:`N` is the batch size. If ``reduction`` is not ``none``
|
||||
(default ``mean``), then:
|
||||
|
||||
.. math::
|
||||
\ell(x, y) =
|
||||
\begin{cases}
|
||||
\operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
|
||||
\operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
|
||||
\end{cases}
|
||||
|
||||
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
|
||||
of :math:`n` elements each.
|
||||
|
||||
The sum operation still operates over all the elements, and divides by :math:`n`.
|
||||
|
||||
The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``.
|
||||
"""
|
||||
|
||||
def __init__(self, p=2, reduction="mean", relative=False):
|
||||
"""
|
||||
:param int p: Degree of Lp norm. It specifies the type of norm to
|
||||
be calculated. See `list of possible orders in torch linalg
|
||||
<https://pytorch.org/docs/stable/generated/torch.linalg.norm.html#torch.linalg.norm>`_ to
|
||||
see the possible degrees. Default 2 (euclidean norm).
|
||||
:param str reduction: Specifies the reduction to apply to the output:
|
||||
``none`` | ``mean`` | ``sum``. When ``none``: no reduction
|
||||
will be applied, ``mean``: the sum of the output will be divided
|
||||
by the number of elements in the output, ``sum``: the output will
|
||||
be summed.
|
||||
:param bool relative: Specifies if relative error should be computed.
|
||||
"""
|
||||
super().__init__(reduction=reduction)
|
||||
|
||||
# check consistency
|
||||
check_consistency(p, (str, int, float))
|
||||
check_consistency(relative, bool)
|
||||
|
||||
self.p = p
|
||||
self.relative = relative
|
||||
|
||||
def forward(self, input, target):
|
||||
"""Forward method for loss function.
|
||||
|
||||
:param torch.Tensor input: Input tensor from real data.
|
||||
:param torch.Tensor target: Model tensor output.
|
||||
:return: Loss evaluation.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
loss = torch.abs((input - target)).pow(self.p).mean(-1)
|
||||
if self.relative:
|
||||
loss = loss / torch.abs(input).pow(self.p).mean(-1)
|
||||
return self._reduction(loss)
|
||||
35
pina/loss/weighted_aggregation.py
Normal file
35
pina/loss/weighted_aggregation.py
Normal file
@@ -0,0 +1,35 @@
|
||||
""" Module for Loss Interface """
|
||||
|
||||
from .weightning_interface import weightningInterface
|
||||
|
||||
|
||||
class WeightedAggregation(WeightningInterface):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
def __init__(self, aggr='mean', weights=None):
|
||||
self.aggr = aggr
|
||||
self.weights = weights
|
||||
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param dict(torch.Tensor) input: The dictionary of losses.
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
if self.weights:
|
||||
weighted_losses = {
|
||||
condition: self.weights[condition] * losses[condition]
|
||||
for condition in losses
|
||||
}
|
||||
else:
|
||||
weighted_losses = losses
|
||||
|
||||
if self.aggr == 'mean':
|
||||
return sum(weighted_losses.values()) / len(weighted_losses)
|
||||
elif self.aggr == 'sum':
|
||||
return sum(weighted_losses.values())
|
||||
else:
|
||||
raise ValueError(self.aggr + " is not valid for aggregation.")
|
||||
24
pina/loss/weightning_interface.py
Normal file
24
pina/loss/weightning_interface.py
Normal file
@@ -0,0 +1,24 @@
|
||||
""" Module for Loss Interface """
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class weightningInterface(metaclass=ABCMeta):
|
||||
"""
|
||||
The ``weightingInterface`` class. TODO
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def aggregate(self, losses):
|
||||
"""
|
||||
Aggregate the losses.
|
||||
|
||||
:param list(torch.Tensor) input: The list
|
||||
:return: The losses aggregation. It should be a scalar Tensor.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
pass
|
||||
11
pina/model/layers/messa_passing.py
Normal file
11
pina/model/layers/messa_passing.py
Normal file
@@ -0,0 +1,11 @@
|
||||
""" Module for Averaging Neural Operator Layer class. """
|
||||
|
||||
from torch import nn, mean
|
||||
from torch_geometric.nn import MessagePassing, InstanceNorm, radius_graph
|
||||
|
||||
from pina.utils import check_consistency
|
||||
|
||||
|
||||
class MessagePassingBlock(nn.Module):
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import torch
|
||||
|
||||
from ...solvers.solver import SolverInterface
|
||||
from pina.utils import check_consistency
|
||||
from pina.loss import LossInterface
|
||||
from pina.loss.loss_interface import LossInterface
|
||||
from pina.problem import InverseProblem
|
||||
from torch.nn.modules.loss import _Loss
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from ..optim import Optimizer, Scheduler, TorchOptimizer, TorchScheduler
|
||||
from .solver import SolverInterface
|
||||
from ..label_tensor import LabelTensor
|
||||
from ..utils import check_consistency
|
||||
from ..loss import LossInterface
|
||||
from ..loss.loss_interface import LossInterface
|
||||
|
||||
|
||||
class SupervisedSolver(SolverInterface):
|
||||
@@ -172,10 +172,6 @@ class SupervisedSolver(SolverInterface):
|
||||
:return: The residual loss averaged on the input coordinates
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
print(input_pts)
|
||||
print(output_pts)
|
||||
print(self.loss)
|
||||
print(self.forward(input_pts))
|
||||
return self.loss(self.forward(input_pts), output_pts)
|
||||
|
||||
@property
|
||||
|
||||
Reference in New Issue
Block a user