Fix Codacy Warnings (#477)
--------- Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
committed by
Nicola Demo
parent
e3790e049a
commit
4177bfbb50
@@ -1,3 +1,7 @@
|
||||
"""
|
||||
Module containing the building blocks for models.
|
||||
"""
|
||||
|
||||
__all__ = [
|
||||
"ContinuousConvBlock",
|
||||
"ResidualBlock",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Module for Averaging Neural Operator Layer class."""
|
||||
|
||||
from torch import nn, mean
|
||||
import torch
|
||||
from torch import nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
@@ -64,4 +65,4 @@ class AVNOBlock(nn.Module):
|
||||
:return: The output tensor obtained from Average Neural Operator Block.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return self._func(self._nn(x) + mean(x, dim=1, keepdim=True))
|
||||
return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True))
|
||||
|
||||
@@ -75,34 +75,29 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
if isinstance(input_numb_field, int):
|
||||
self._input_numb_field = input_numb_field
|
||||
else:
|
||||
if not isinstance(input_numb_field, int):
|
||||
raise ValueError("input_numb_field must be int.")
|
||||
self._input_numb_field = input_numb_field
|
||||
|
||||
if isinstance(output_numb_field, int):
|
||||
self._output_numb_field = output_numb_field
|
||||
else:
|
||||
if not isinstance(output_numb_field, int):
|
||||
raise ValueError("input_numb_field must be int.")
|
||||
self._output_numb_field = output_numb_field
|
||||
|
||||
if isinstance(filter_dim, (tuple, list)):
|
||||
vect = filter_dim
|
||||
else:
|
||||
if not isinstance(filter_dim, (tuple, list)):
|
||||
raise ValueError("filter_dim must be tuple or list.")
|
||||
vect = filter_dim
|
||||
vect = torch.tensor(vect)
|
||||
self.register_buffer("_dim", vect, persistent=False)
|
||||
|
||||
if isinstance(stride, dict):
|
||||
self._stride = Stride(stride)
|
||||
else:
|
||||
if not isinstance(stride, dict):
|
||||
raise ValueError("stride must be dictionary.")
|
||||
self._stride = Stride(stride)
|
||||
|
||||
self._net = model
|
||||
|
||||
if isinstance(optimize, bool):
|
||||
self._optimize = optimize
|
||||
else:
|
||||
if not isinstance(optimize, bool):
|
||||
raise ValueError("optimize must be bool.")
|
||||
self._optimize = optimize
|
||||
|
||||
# choosing how to initialize based on optimization
|
||||
if self._optimize:
|
||||
@@ -119,13 +114,18 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
|
||||
|
||||
if no_overlap:
|
||||
raise NotImplementedError
|
||||
self.transpose = self.transpose_no_overlap
|
||||
else:
|
||||
self.transpose = self.transpose_overlap
|
||||
|
||||
self.transpose = self.transpose_overlap
|
||||
|
||||
class DefaultKernel(torch.nn.Module):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
def __init__(self, input_dim, output_dim):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
super().__init__()
|
||||
assert isinstance(input_dim, int)
|
||||
assert isinstance(output_dim, int)
|
||||
@@ -138,44 +138,66 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._model(x)
|
||||
|
||||
@property
|
||||
def net(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._net
|
||||
|
||||
@property
|
||||
def stride(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._stride
|
||||
|
||||
@property
|
||||
def filter_dim(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._dim
|
||||
|
||||
@property
|
||||
def input_numb_field(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._input_numb_field
|
||||
|
||||
@property
|
||||
def output_numb_field(self):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return self._output_numb_field
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def forward(self, X):
|
||||
pass
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def transpose_overlap(self, X):
|
||||
pass
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def transpose_no_overlap(self, X):
|
||||
pass
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _initialize_convolution(self, X, type):
|
||||
pass
|
||||
def _initialize_convolution(self, X, type_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
"""Module for Continuous Convolution class"""
|
||||
|
||||
import torch
|
||||
from .convolution import BaseContinuousConv
|
||||
from .utils_convolution import check_point, map_points_
|
||||
from .integral import Integral
|
||||
import torch
|
||||
|
||||
|
||||
class ContinuousConvBlock(BaseContinuousConv):
|
||||
@@ -27,8 +27,9 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Coscia, D., Meneghetti, L., Demo, N. et al.
|
||||
*A continuous convolutional trainable filter for modelling unstructured data*.
|
||||
Comput Mech 72, 253–265 (2023). DOI `<https://doi.org/10.1007/s00466-023-02291-1>`_
|
||||
*A continuous convolutional trainable filter for modelling
|
||||
unstructured data*. Comput Mech 72, 253–265 (2023).
|
||||
DOI `<https://doi.org/10.1007/s00466-023-02291-1>`_
|
||||
|
||||
"""
|
||||
|
||||
@@ -45,7 +46,8 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
"""
|
||||
:param input_numb_field: Number of fields :math:`N_{in}` in the input.
|
||||
:type input_numb_field: int
|
||||
:param output_numb_field: Number of fields :math:`N_{out}` in the output.
|
||||
:param output_numb_field: Number of fields :math:`N_{out}` in the
|
||||
output.
|
||||
:type output_numb_field: int
|
||||
:param filter_dim: Dimension of the filter.
|
||||
:type filter_dim: tuple(int) | list(int)
|
||||
@@ -134,6 +136,11 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
# stride for continuous convolution overridden
|
||||
self._stride = self._stride._stride_discrete
|
||||
|
||||
# Define variables
|
||||
self._index = None
|
||||
self._grid = None
|
||||
self._grid_transpose = None
|
||||
|
||||
def _spawn_networks(self, model):
|
||||
"""
|
||||
Private method to create a collection of kernels
|
||||
@@ -152,7 +159,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
else:
|
||||
if not isinstance(model, object):
|
||||
raise ValueError(
|
||||
"Expected a python class inheriting" " from torch.nn.Module"
|
||||
"Expected a python class inheriting from torch.nn.Module"
|
||||
)
|
||||
|
||||
for _ in range(self._input_numb_field * self._output_numb_field):
|
||||
@@ -271,7 +278,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
# save on tmp
|
||||
self._grid_transpose = tmp
|
||||
|
||||
def _make_grid(self, X, type):
|
||||
def _make_grid(self, X, type_):
|
||||
"""
|
||||
Private method to create convolution grid.
|
||||
|
||||
@@ -283,14 +290,15 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
|
||||
"""
|
||||
# choose the type of convolution
|
||||
if type == "forward":
|
||||
return self._make_grid_forward(X)
|
||||
elif type == "inverse":
|
||||
if type_ == "forward":
|
||||
self._make_grid_forward(X)
|
||||
return
|
||||
if type_ == "inverse":
|
||||
self._make_grid_transpose(X)
|
||||
else:
|
||||
raise TypeError
|
||||
return
|
||||
raise TypeError
|
||||
|
||||
def _initialize_convolution(self, X, type="forward"):
|
||||
def _initialize_convolution(self, X, type_="forward"):
|
||||
"""
|
||||
Private method to intialize the convolution.
|
||||
The convolution is initialized by setting a grid and
|
||||
@@ -304,7 +312,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
"""
|
||||
|
||||
# variable for the convolution
|
||||
self._make_grid(X, type)
|
||||
self._make_grid(X, type_)
|
||||
|
||||
# calculate the index
|
||||
self._find_index(X)
|
||||
@@ -321,7 +329,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
|
||||
# initialize convolution
|
||||
if self.training: # we choose what to do based on optimization
|
||||
self._choose_initialization(X, type="forward")
|
||||
self._choose_initialization(X, type_="forward")
|
||||
|
||||
else: # we always initialize on testing
|
||||
self._initialize_convolution(X, "forward")
|
||||
@@ -383,12 +391,14 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
:type integral: torch.tensor
|
||||
:param X: Input data. Expect tensor of shape
|
||||
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
|
||||
:math`N_{in}`is the number of input fields, :math:`M` the number of points
|
||||
:math`N_{in}`is the number of input fields, :math:`M` the number of
|
||||
points
|
||||
in the mesh, :math:`D` the dimension of the problem.
|
||||
:type X: torch.Tensor
|
||||
:return: Feed forward transpose convolution. Tensor of shape
|
||||
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
|
||||
:math`N_{out}`is the number of input fields, :math:`M` the number of points
|
||||
:math`N_{out}`is the number of input fields, :math:`M` the number of
|
||||
points
|
||||
in the mesh, :math:`D` the dimension of the problem.
|
||||
:rtype: torch.Tensor
|
||||
|
||||
@@ -399,7 +409,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
|
||||
# initialize convolution
|
||||
if self.training: # we choose what to do based on optimization
|
||||
self._choose_initialization(X, type="inverse")
|
||||
self._choose_initialization(X, type_="inverse")
|
||||
|
||||
else: # we always initialize on testing
|
||||
self._initialize_convolution(X, "inverse")
|
||||
@@ -466,12 +476,14 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
:type integral: torch.tensor
|
||||
:param X: Input data. Expect tensor of shape
|
||||
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
|
||||
:math`N_{in}`is the number of input fields, :math:`M` the number of points
|
||||
:math`N_{in}`is the number of input fields, :math:`M` the number of
|
||||
points
|
||||
in the mesh, :math:`D` the dimension of the problem.
|
||||
:type X: torch.Tensor
|
||||
:return: Feed forward transpose convolution. Tensor of shape
|
||||
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
|
||||
:math`N_{out}`is the number of input fields, :math:`M` the number of points
|
||||
:math`N_{out}`is the number of input fields, :math:`M` the number of
|
||||
points
|
||||
in the mesh, :math:`D` the dimension of the problem.
|
||||
:rtype: torch.Tensor
|
||||
|
||||
@@ -481,7 +493,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
|
||||
# initialize convolution
|
||||
if self.training: # we choose what to do based on optimization
|
||||
self._choose_initialization(X, type="inverse")
|
||||
self._choose_initialization(X, type_="inverse")
|
||||
|
||||
else: # we always initialize on testing
|
||||
self._initialize_convolution(X, "inverse")
|
||||
@@ -491,7 +503,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
conv_transposed = self._grid_transpose.clone().detach()
|
||||
|
||||
# list to iterate for calculating nn output
|
||||
tmp = [i for i in range(self._output_numb_field)]
|
||||
tmp = list(range(self._output_numb_field))
|
||||
iterate_conv = [
|
||||
item for item in tmp for _ in range(self._input_numb_field)
|
||||
]
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
import torch
|
||||
from pina.utils import check_consistency
|
||||
from typing import Union, Sequence
|
||||
|
||||
|
||||
class PeriodicBoundaryEmbedding(torch.nn.Module):
|
||||
@@ -18,8 +17,9 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
|
||||
u(\mathbf{x}) = u(\mathbf{x} + n \mathbf{L})\;\;
|
||||
\forall n\in\mathbb{N}.
|
||||
|
||||
The :meth:`PeriodicBoundaryEmbedding` augments the input such that the periodic conditons
|
||||
is guarantee. The input is augmented by the following formula:
|
||||
The :meth:`PeriodicBoundaryEmbedding` augments the input such that the
|
||||
periodic conditonsis guarantee. The input is augmented by the following
|
||||
formula:
|
||||
|
||||
.. math::
|
||||
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[1,
|
||||
@@ -135,13 +135,13 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
|
||||
if isinstance(indeces[0], str):
|
||||
try:
|
||||
return x.extract(indeces)
|
||||
except AttributeError:
|
||||
except AttributeError as e:
|
||||
raise RuntimeError(
|
||||
"Not possible to extract input variables from tensor."
|
||||
" Ensure that the passed tensor is a LabelTensor or"
|
||||
" pass list of integers to extract variables. For"
|
||||
" more information refer to warning in the documentation."
|
||||
)
|
||||
) from e
|
||||
elif isinstance(indeces[0], int):
|
||||
return x[..., indeces]
|
||||
else:
|
||||
@@ -159,11 +159,14 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
|
||||
|
||||
|
||||
class FourierFeatureEmbedding(torch.nn.Module):
|
||||
"""
|
||||
Fourier Feature Embedding class for encoding input features
|
||||
using random Fourier features.
|
||||
"""
|
||||
|
||||
def __init__(self, input_dimension, output_dimension, sigma):
|
||||
r"""
|
||||
Fourier Feature Embedding class for encoding input features
|
||||
using random Fourier features.This class applies a Fourier
|
||||
transformation to the input features,
|
||||
This class applies a Fourier transformation to the input features,
|
||||
which can help in learning high-frequency variations in data.
|
||||
If multiple sigma are provided, the class
|
||||
supports multiscale feature embedding, creating embeddings for
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
"""
|
||||
Module for Fourier Block implementation.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
from . import (
|
||||
from .spectral import (
|
||||
SpectralConvBlock1D,
|
||||
SpectralConvBlock2D,
|
||||
SpectralConvBlock3D,
|
||||
@@ -17,9 +21,9 @@ class FourierBlock1D(nn.Module):
|
||||
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., Liu, B.,
|
||||
Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier neural operator for
|
||||
parametric partial differential equations*.
|
||||
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
|
||||
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier
|
||||
neural operator for parametric partial differential equations*.
|
||||
DOI: `arXiv preprint arXiv:2010.08895.
|
||||
<https://arxiv.org/abs/2010.08895>`_
|
||||
|
||||
@@ -32,24 +36,26 @@ class FourierBlock1D(nn.Module):
|
||||
n_modes,
|
||||
activation=torch.nn.Tanh,
|
||||
):
|
||||
super().__init__()
|
||||
"""
|
||||
PINA implementation of Fourier block one dimension. The module computes
|
||||
the spectral convolution of the input with a linear kernel in the
|
||||
fourier space, and then it maps the input back to the physical
|
||||
space. The output is then added to a Linear tranformation of the
|
||||
input in the physical space. Finally an activation function is
|
||||
applied to the output.
|
||||
applied to the output.
|
||||
|
||||
The block expects an input of size ``[batch, input_numb_fields, N]``
|
||||
and returns an output of size ``[batch, output_numb_fields, N]``.
|
||||
|
||||
:param int input_numb_fields: The number of channels for the input.
|
||||
:param int output_numb_fields: The number of channels for the output.
|
||||
:param list | tuple n_modes: Number of modes to select for each dimension.
|
||||
It must be at most equal to the ``floor(N/2)+1``.
|
||||
:param list | tuple n_modes: Number of modes to select for each
|
||||
dimension. It must be at most equal to the ``floor(N/2)+1``.
|
||||
:param torch.nn.Module activation: The activation function.
|
||||
"""
|
||||
|
||||
super().__init__()
|
||||
|
||||
# check type consistency
|
||||
check_consistency(activation(), nn.Module)
|
||||
|
||||
@@ -109,13 +115,15 @@ class FourierBlock2D(nn.Module):
|
||||
input in the physical space. Finally an activation function is
|
||||
applied to the output.
|
||||
|
||||
The block expects an input of size ``[batch, input_numb_fields, Nx, Ny]``
|
||||
and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``.
|
||||
The block expects an input of size
|
||||
``[batch, input_numb_fields, Nx, Ny]`` and returns an output of size
|
||||
``[batch, output_numb_fields, Nx, Ny]``.
|
||||
|
||||
:param int input_numb_fields: The number of channels for the input.
|
||||
:param int output_numb_fields: The number of channels for the output.
|
||||
:param list | tuple n_modes: Number of modes to select for each dimension.
|
||||
It must be at most equal to the ``floor(Nx/2)+1`` and ``floor(Ny/2)+1``.
|
||||
:param list | tuple n_modes: Number of modes to select for each
|
||||
dimension. It must be at most equal to the ``floor(Nx/2)+1``
|
||||
and ``floor(Ny/2)+1``.
|
||||
:param torch.nn.Module activation: The activation function.
|
||||
"""
|
||||
super().__init__()
|
||||
@@ -172,21 +180,22 @@ class FourierBlock3D(nn.Module):
|
||||
activation=torch.nn.Tanh,
|
||||
):
|
||||
"""
|
||||
PINA implementation of Fourier block three dimensions. The module computes
|
||||
the spectral convolution of the input with a linear kernel in the
|
||||
fourier space, and then it maps the input back to the physical
|
||||
PINA implementation of Fourier block three dimensions. The module
|
||||
computes the spectral convolution of the input with a linear kernel in
|
||||
the fourier space, and then it maps the input back to the physical
|
||||
space. The output is then added to a Linear tranformation of the
|
||||
input in the physical space. Finally an activation function is
|
||||
applied to the output.
|
||||
|
||||
The block expects an input of size ``[batch, input_numb_fields, Nx, Ny, Nz]``
|
||||
and returns an output of size ``[batch, output_numb_fields, Nx, Ny, Nz]``.
|
||||
The block expects an input of size
|
||||
``[batch, input_numb_fields, Nx, Ny, Nz]`` and returns an output of size
|
||||
``[batch, output_numb_fields, Nx, Ny, Nz]``.
|
||||
|
||||
:param int input_numb_fields: The number of channels for the input.
|
||||
:param int output_numb_fields: The number of channels for the output.
|
||||
:param list | tuple n_modes: Number of modes to select for each dimension.
|
||||
It must be at most equal to the ``floor(Nx/2)+1``, ``floor(Ny/2)+1``
|
||||
and ``floor(Nz/2)+1``.
|
||||
:param list | tuple n_modes: Number of modes to select for each
|
||||
dimension. It must be at most equal to the ``floor(Nx/2)+1``,
|
||||
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``.
|
||||
:param torch.nn.Module activation: The activation function.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
"""
|
||||
Module containing the Graph Integral Layer class.
|
||||
"""
|
||||
|
||||
import torch
|
||||
from torch_geometric.nn import MessagePassing
|
||||
|
||||
|
||||
class GNOBlock(MessagePassing):
|
||||
"""
|
||||
TODO: Add documentation
|
||||
Graph Neural Operator (GNO) Block using PyG MessagePassing.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@@ -18,21 +22,21 @@ class GNOBlock(MessagePassing):
|
||||
external_func=None,
|
||||
):
|
||||
"""
|
||||
Initialize the Graph Integral Layer, inheriting from the MessagePassing class of PyTorch Geometric.
|
||||
Initialize the GNOBlock.
|
||||
|
||||
:param width: The width of the hidden representation of the nodes features
|
||||
:type width: int
|
||||
:param edges_features: The number of edge features.
|
||||
:type edges_features: int
|
||||
:param n_layers: The number of layers in the Feed Forward Neural Network used to compute the representation of the edges features.
|
||||
:type n_layers: int
|
||||
:param width: Hidden dimension of node features.
|
||||
:param edges_features: Number of edge features.
|
||||
:param n_layers: Number of layers in edge transformation MLP.
|
||||
"""
|
||||
from pina.model import FeedForward
|
||||
|
||||
super(GNOBlock, self).__init__(aggr="mean")
|
||||
from ...model.feed_forward import FeedForward
|
||||
|
||||
super().__init__(aggr="mean") # Uses PyG's default aggregation
|
||||
self.width = width
|
||||
|
||||
if layers is None and inner_size is None:
|
||||
inner_size = width
|
||||
|
||||
self.dense = FeedForward(
|
||||
input_dimensions=edges_features,
|
||||
output_dimensions=width**2,
|
||||
@@ -41,48 +45,50 @@ class GNOBlock(MessagePassing):
|
||||
inner_size=inner_size,
|
||||
func=internal_func,
|
||||
)
|
||||
|
||||
self.W = torch.nn.Linear(width, width)
|
||||
self.func = external_func()
|
||||
|
||||
def message(self, x_j, edge_attr):
|
||||
def message_and_aggregate(self, edge_index, x, edge_attr):
|
||||
"""
|
||||
This function computes the message passed between the nodes of the graph. Overwrite the default message function defined in the MessagePassing class.
|
||||
Combines message and aggregation.
|
||||
|
||||
:param x_j: The node features of the neighboring.
|
||||
:type x_j: torch.Tensor
|
||||
:param edge_attr: The edge features.
|
||||
:type edge_attr: torch.Tensor
|
||||
:return: The message passed between the nodes of the graph.
|
||||
:rtype: torch.Tensor
|
||||
:param edge_index: COO format edge indices.
|
||||
:param x: Node feature matrix [num_nodes, width].
|
||||
:param edge_attr: Edge features [num_edges, edge_dim].
|
||||
:return: Aggregated messages.
|
||||
"""
|
||||
x = self.dense(edge_attr).view(-1, self.width, self.width)
|
||||
return torch.einsum("bij,bj->bi", x, x_j)
|
||||
# Edge features are transformed into a matrix of shape
|
||||
# [num_edges, width, width]
|
||||
x_ = self.dense(edge_attr).view(-1, self.width, self.width)
|
||||
# Messages are computed as the product of the edge features
|
||||
messages = torch.einsum("bij,bj->bi", x_, x[edge_index[0]])
|
||||
# Aggregation is performed using the mean (set in the constructor)
|
||||
return self.aggregate(messages, edge_index[1])
|
||||
|
||||
def edge_update(self, edge_attr):
|
||||
"""
|
||||
Updates edge features.
|
||||
"""
|
||||
return edge_attr
|
||||
|
||||
def update(self, aggr_out, x):
|
||||
"""
|
||||
This function updates the node features of the graph. Overwrite the default update function defined in the MessagePassing class.
|
||||
Updates node features.
|
||||
|
||||
:param aggr_out: The aggregated messages.
|
||||
:type aggr_out: torch.Tensor
|
||||
:param x: The node features.
|
||||
:type x: torch.Tensor
|
||||
:return: The updated node features.
|
||||
:rtype: torch.Tensor
|
||||
:param aggr_out: Aggregated messages.
|
||||
:param x: Node feature matrix.
|
||||
:return: Updated node features.
|
||||
"""
|
||||
aggr_out = aggr_out + self.W(x)
|
||||
return aggr_out
|
||||
return aggr_out + self.W(x)
|
||||
|
||||
def forward(self, x, edge_index, edge_attr):
|
||||
"""
|
||||
The forward pass of the Graph Integral Layer.
|
||||
Forward pass of the GNOBlock.
|
||||
|
||||
:param x: Node features.
|
||||
:type x: torch.Tensor
|
||||
:param edge_index: Edge index.
|
||||
:type edge_index: torch.Tensor
|
||||
:param edge_index: Edge indices.
|
||||
:param edge_attr: Edge features.
|
||||
:type edge_attr: torch.Tensor
|
||||
:return: Output of a single iteration over the Graph Integral Layer.
|
||||
:rtype: torch.Tensor
|
||||
:return: Updated node features.
|
||||
"""
|
||||
return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr))
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
"""
|
||||
Module for performing integral for continuous convolution
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class Integral(object):
|
||||
class Integral:
|
||||
"""
|
||||
Integral class for continous convolution
|
||||
"""
|
||||
|
||||
def __init__(self, param):
|
||||
"""Integral class for continous convolution
|
||||
"""
|
||||
Initialize the integral class
|
||||
|
||||
:param param: type of continuous convolution
|
||||
:type param: string
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
import torch
|
||||
|
||||
from pina.utils import check_consistency
|
||||
import pina.model as pm # avoid circular import
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
class LowRankBlock(torch.nn.Module):
|
||||
@@ -78,9 +77,10 @@ class LowRankBlock(torch.nn.Module):
|
||||
basis function network.
|
||||
"""
|
||||
super().__init__()
|
||||
from ..feed_forward import FeedForward
|
||||
|
||||
# Assignment (check consistency inside FeedForward)
|
||||
self._basis = pm.FeedForward(
|
||||
self._basis = FeedForward(
|
||||
input_dimensions=input_dimensions,
|
||||
output_dimensions=2 * rank * embedding_dimenion,
|
||||
inner_size=inner_size,
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
"""Module for Base Continuous Convolution class."""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import torch
|
||||
from .stride import Stride
|
||||
from .utils_convolution import optimizing
|
||||
import warnings
|
||||
|
||||
|
||||
class PODBlock(torch.nn.Module):
|
||||
@@ -15,7 +11,8 @@ class PODBlock(torch.nn.Module):
|
||||
The layer is not trainable.
|
||||
|
||||
.. note::
|
||||
All the POD modes are stored in memory, avoiding to recompute them when the rank changes but increasing the memory usage.
|
||||
All the POD modes are stored in memory, avoiding to recompute them when
|
||||
the rank changes but increasing the memory usage.
|
||||
"""
|
||||
|
||||
def __init__(self, rank, scale_coefficients=True):
|
||||
@@ -51,7 +48,8 @@ class PODBlock(torch.nn.Module):
|
||||
@property
|
||||
def basis(self):
|
||||
"""
|
||||
The POD basis. It is a matrix whose columns are the first `self.rank` POD modes.
|
||||
The POD basis. It is a matrix whose columns are the first `self.rank`
|
||||
POD modes.
|
||||
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
@@ -69,7 +67,7 @@ class PODBlock(torch.nn.Module):
|
||||
:rtype: dict
|
||||
"""
|
||||
if self._scaler is None:
|
||||
return
|
||||
return None
|
||||
|
||||
return {
|
||||
"mean": self._scaler["mean"][: self.rank],
|
||||
@@ -115,7 +113,8 @@ class PODBlock(torch.nn.Module):
|
||||
|
||||
def _fit_pod(self, X, randomized):
|
||||
"""
|
||||
Private method that computes the POD basis of the given tensor and stores it in the private member `_basis`.
|
||||
Private method that computes the POD basis of the given tensor and
|
||||
stores it in the private member `_basis`.
|
||||
|
||||
:param torch.Tensor X: The tensor to be reduced.
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
"""
|
||||
TODO: Add title.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
@@ -35,7 +39,8 @@ class ResidualBlock(nn.Module):
|
||||
(first block).
|
||||
:param bool spectral_norm: Apply spectral normalization to feedforward
|
||||
layers, defaults to False.
|
||||
:param torch.nn.Module activation: Cctivation function after first block.
|
||||
:param torch.nn.Module activation: Cctivation function after first
|
||||
block.
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
@@ -81,19 +86,17 @@ class ResidualBlock(nn.Module):
|
||||
return nn.utils.spectral_norm(x) if self._spectral_norm else x
|
||||
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class EnhancedLinear(torch.nn.Module):
|
||||
"""
|
||||
A wrapper class for enhancing a linear layer with activation and/or dropout.
|
||||
|
||||
:param layer: The linear layer to be enhanced.
|
||||
:type layer: torch.nn.Module
|
||||
:param activation: The activation function to be applied after the linear layer.
|
||||
:param activation: The activation function to be applied after the linear
|
||||
layer.
|
||||
:type activation: torch.nn.Module
|
||||
:param dropout: The dropout probability to be applied after the activation (if provided).
|
||||
:param dropout: The dropout probability to be applied after the activation
|
||||
(if provided).
|
||||
:type dropout: float
|
||||
|
||||
:Example:
|
||||
@@ -110,9 +113,11 @@ class EnhancedLinear(torch.nn.Module):
|
||||
|
||||
:param layer: The linear layer to be enhanced.
|
||||
:type layer: torch.nn.Module
|
||||
:param activation: The activation function to be applied after the linear layer.
|
||||
:param activation: The activation function to be applied after the
|
||||
linear layer.
|
||||
:type activation: torch.nn.Module
|
||||
:param dropout: The dropout probability to be applied after the activation (if provided).
|
||||
:param dropout: The dropout probability to be applied after the
|
||||
activation (if provided).
|
||||
:type dropout: float
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
"""
|
||||
TODO: Add title.
|
||||
"""
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch import nn
|
||||
from ...utils import check_consistency
|
||||
import warnings
|
||||
|
||||
|
||||
######## 1D Spectral Convolution ###########
|
||||
@@ -13,7 +16,8 @@ class SpectralConvBlock1D(nn.Module):
|
||||
|
||||
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
|
||||
"""
|
||||
The module computes the spectral convolution of the input with a linear kernel in the
|
||||
The module computes the spectral convolution of the input with a linear
|
||||
kernel in the
|
||||
fourier space, and then it maps the input back to the physical
|
||||
space.
|
||||
|
||||
@@ -106,17 +110,20 @@ class SpectralConvBlock2D(nn.Module):
|
||||
|
||||
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
|
||||
"""
|
||||
The module computes the spectral convolution of the input with a linear kernel in the
|
||||
The module computes the spectral convolution of the input with a linear
|
||||
kernel in the
|
||||
fourier space, and then it maps the input back to the physical
|
||||
space.
|
||||
|
||||
The block expects an input of size ``[batch, input_numb_fields, Nx, Ny]``
|
||||
The block expects an input of size
|
||||
``[batch, input_numb_fields, Nx, Ny]``
|
||||
and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``.
|
||||
|
||||
:param int input_numb_fields: The number of channels for the input.
|
||||
:param int output_numb_fields: The number of channels for the output.
|
||||
:param list | tuple n_modes: Number of modes to select for each dimension.
|
||||
It must be at most equal to the ``floor(Nx/2)+1`` and ``floor(Ny/2)+1``.
|
||||
:param list | tuple n_modes: Number of modes to select for each
|
||||
dimension. It must be at most equal to the ``floor(Nx/2)+1`` and
|
||||
``floor(Ny/2)+1``.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@@ -234,18 +241,21 @@ class SpectralConvBlock3D(nn.Module):
|
||||
|
||||
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
|
||||
"""
|
||||
The module computes the spectral convolution of the input with a linear kernel in the
|
||||
The module computes the spectral convolution of the input with a
|
||||
linear kernel in the
|
||||
fourier space, and then it maps the input back to the physical
|
||||
space.
|
||||
|
||||
The block expects an input of size ``[batch, input_numb_fields, Nx, Ny, Nz]``
|
||||
and returns an output of size ``[batch, output_numb_fields, Nx, Ny, Nz]``.
|
||||
The block expects an input of size
|
||||
``[batch, input_numb_fields, Nx, Ny, Nz]``
|
||||
and returns an output of size
|
||||
``[batch, output_numb_fields, Nx, Ny, Nz]``.
|
||||
|
||||
:param int input_numb_fields: The number of channels for the input.
|
||||
:param int output_numb_fields: The number of channels for the output.
|
||||
:param list | tuple n_modes: Number of modes to select for each dimension.
|
||||
It must be at most equal to the ``floor(Nx/2)+1``, ``floor(Ny/2)+1``
|
||||
and ``floor(Nz/2)+1``.
|
||||
:param list | tuple n_modes: Number of modes to select for each
|
||||
dimension. It must be at most equal to the ``floor(Nx/2)+1``,
|
||||
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``.
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@@ -347,7 +357,8 @@ class SpectralConvBlock3D(nn.Module):
|
||||
``[batch, input_numb_fields, x, y, z]``.
|
||||
:type x: torch.Tensor
|
||||
:return: The output tensor obtained from the
|
||||
spectral convolution of size ``[batch, output_numb_fields, x, y, z]``.
|
||||
spectral convolution of size
|
||||
``[batch, output_numb_fields, x, y, z]``.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
|
||||
|
||||
@@ -1,18 +1,25 @@
|
||||
"""
|
||||
TODO: Add description
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
class Stride(object):
|
||||
class Stride:
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
def __init__(self, dict):
|
||||
def __init__(self, dict_):
|
||||
"""Stride class for continous convolution
|
||||
|
||||
:param param: type of continuous convolution
|
||||
:type param: string
|
||||
"""
|
||||
|
||||
self._dict_stride = dict
|
||||
self._dict_stride = dict_
|
||||
self._stride_continuous = None
|
||||
self._stride_discrete = self._create_stride_discrete(dict)
|
||||
self._stride_discrete = self._create_stride_discrete(dict_)
|
||||
|
||||
def _create_stride_discrete(self, my_dict):
|
||||
"""Creating the list for applying the filter
|
||||
@@ -46,13 +53,13 @@ class Stride(object):
|
||||
|
||||
# checking
|
||||
|
||||
if not all([len(s) == len(domain) for s in my_dict.values()]):
|
||||
if not all(len(s) == len(domain) for s in my_dict.values()):
|
||||
raise IndexError("values in the dict must have all same length")
|
||||
|
||||
if not all(v >= 0 for v in domain):
|
||||
raise ValueError("domain values must be greater than 0")
|
||||
|
||||
if not all(v == 1 or v == -1 or v == 0 for v in direction):
|
||||
if not all(v in (0, -1, 1) for v in direction):
|
||||
raise ValueError("direction must be either equal to 1, -1 or 0")
|
||||
|
||||
seq_jumps = [i for i, e in enumerate(jumps) if e == 0]
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def check_point(x, current_stride, dim):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
max_stride = current_stride + dim
|
||||
indeces = torch.logical_and(
|
||||
x[..., :-1] < max_stride, x[..., :-1] >= current_stride
|
||||
@@ -33,16 +40,18 @@ def optimizing(f):
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
|
||||
if kwargs["type"] == "forward":
|
||||
if kwargs["type_"] == "forward":
|
||||
if not wrapper.has_run_inverse:
|
||||
wrapper.has_run_inverse = True
|
||||
return f(*args, **kwargs)
|
||||
|
||||
if kwargs["type"] == "inverse":
|
||||
if kwargs["type_"] == "inverse":
|
||||
if not wrapper.has_run:
|
||||
wrapper.has_run = True
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
wrapper.has_run_inverse = False
|
||||
wrapper.has_run = False
|
||||
|
||||
|
||||
Reference in New Issue
Block a user