Formatting
* Adding black as dev dependency * Formatting pina code * Formatting tests
This commit is contained in:
committed by
Nicola Demo
parent
4c4482b155
commit
42ab1a666b
@@ -10,7 +10,7 @@ __all__ = [
|
||||
"AveragingNeuralOperator",
|
||||
"LowRankNeuralOperator",
|
||||
"Spline",
|
||||
"GraphNeuralOperator"
|
||||
"GraphNeuralOperator",
|
||||
]
|
||||
|
||||
from .feed_forward import FeedForward, ResidualFeedForward
|
||||
@@ -21,4 +21,4 @@ from .kernel_neural_operator import KernelNeuralOperator
|
||||
from .average_neural_operator import AveragingNeuralOperator
|
||||
from .low_rank_neural_operator import LowRankNeuralOperator
|
||||
from .spline import Spline
|
||||
from .graph_neural_operator import GraphNeuralOperator
|
||||
from .graph_neural_operator import GraphNeuralOperator
|
||||
|
||||
@@ -15,7 +15,7 @@ __all__ = [
|
||||
"AVNOBlock",
|
||||
"LowRankBlock",
|
||||
"RBFBlock",
|
||||
"GNOBlock"
|
||||
"GNOBlock",
|
||||
]
|
||||
|
||||
from .convolution_2d import ContinuousConvBlock
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
""" Module for Averaging Neural Operator Layer class. """
|
||||
"""Module for Averaging Neural Operator Layer class."""
|
||||
|
||||
from torch import nn, mean
|
||||
from ...utils import check_consistency
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
""" Embedding modulus. """
|
||||
"""Embedding modulus."""
|
||||
|
||||
import torch
|
||||
from pina.utils import check_consistency
|
||||
|
||||
@@ -8,14 +8,14 @@ class GNOBlock(MessagePassing):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
edges_features,
|
||||
n_layers=2,
|
||||
layers=None,
|
||||
inner_size=None,
|
||||
internal_func=None,
|
||||
external_func=None
|
||||
self,
|
||||
width,
|
||||
edges_features,
|
||||
n_layers=2,
|
||||
layers=None,
|
||||
inner_size=None,
|
||||
internal_func=None,
|
||||
external_func=None,
|
||||
):
|
||||
"""
|
||||
Initialize the Graph Integral Layer, inheriting from the MessagePassing class of PyTorch Geometric.
|
||||
@@ -28,16 +28,19 @@ class GNOBlock(MessagePassing):
|
||||
:type n_layers: int
|
||||
"""
|
||||
from pina.model import FeedForward
|
||||
super(GNOBlock, self).__init__(aggr='mean')
|
||||
|
||||
super(GNOBlock, self).__init__(aggr="mean")
|
||||
self.width = width
|
||||
if layers is None and inner_size is None:
|
||||
inner_size = width
|
||||
self.dense = FeedForward(input_dimensions=edges_features,
|
||||
output_dimensions=width ** 2,
|
||||
n_layers=n_layers,
|
||||
layers=layers,
|
||||
inner_size=inner_size,
|
||||
func=internal_func)
|
||||
self.dense = FeedForward(
|
||||
input_dimensions=edges_features,
|
||||
output_dimensions=width**2,
|
||||
n_layers=n_layers,
|
||||
layers=layers,
|
||||
inner_size=inner_size,
|
||||
func=internal_func,
|
||||
)
|
||||
self.W = torch.nn.Linear(width, width)
|
||||
self.func = external_func()
|
||||
|
||||
@@ -53,7 +56,7 @@ class GNOBlock(MessagePassing):
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
x = self.dense(edge_attr).view(-1, self.width, self.width)
|
||||
return torch.einsum('bij,bj->bi', x, x_j)
|
||||
return torch.einsum("bij,bj->bi", x, x_j)
|
||||
|
||||
def update(self, aggr_out, x):
|
||||
"""
|
||||
@@ -82,6 +85,4 @@ class GNOBlock(MessagePassing):
|
||||
:return: Output of a single iteration over the Graph Integral Layer.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return self.func(
|
||||
self.propagate(edge_index, x=x, edge_attr=edge_attr)
|
||||
)
|
||||
return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
""" Module for Averaging Neural Operator Layer class. """
|
||||
"""Module for Averaging Neural Operator Layer class."""
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
@@ -10,16 +10,16 @@ class GraphNeuralKernel(torch.nn.Module):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
width,
|
||||
edge_features,
|
||||
n_layers=2,
|
||||
internal_n_layers=0,
|
||||
internal_layers=None,
|
||||
inner_size=None,
|
||||
internal_func=None,
|
||||
external_func=None,
|
||||
shared_weights=False
|
||||
self,
|
||||
width,
|
||||
edge_features,
|
||||
n_layers=2,
|
||||
internal_n_layers=0,
|
||||
internal_layers=None,
|
||||
inner_size=None,
|
||||
internal_func=None,
|
||||
external_func=None,
|
||||
shared_weights=False,
|
||||
):
|
||||
"""
|
||||
The Graph Neural Kernel constructor.
|
||||
@@ -53,21 +53,24 @@ class GraphNeuralKernel(torch.nn.Module):
|
||||
layers=internal_layers,
|
||||
inner_size=inner_size,
|
||||
internal_func=internal_func,
|
||||
external_func=external_func)
|
||||
external_func=external_func,
|
||||
)
|
||||
self.n_layers = n_layers
|
||||
self.forward = self.forward_shared
|
||||
else:
|
||||
self.layers = torch.nn.ModuleList(
|
||||
[GNOBlock(
|
||||
width=width,
|
||||
edges_features=edge_features,
|
||||
n_layers=internal_n_layers,
|
||||
layers=internal_layers,
|
||||
inner_size=inner_size,
|
||||
internal_func=internal_func,
|
||||
external_func=external_func
|
||||
)
|
||||
for _ in range(n_layers)]
|
||||
[
|
||||
GNOBlock(
|
||||
width=width,
|
||||
edges_features=edge_features,
|
||||
n_layers=internal_n_layers,
|
||||
layers=internal_layers,
|
||||
inner_size=inner_size,
|
||||
internal_func=internal_func,
|
||||
external_func=external_func,
|
||||
)
|
||||
for _ in range(n_layers)
|
||||
]
|
||||
)
|
||||
|
||||
def forward(self, x, edge_index, edge_attr):
|
||||
@@ -107,17 +110,17 @@ class GraphNeuralOperator(KernelNeuralOperator):
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
lifting_operator,
|
||||
projection_operator,
|
||||
edge_features,
|
||||
n_layers=10,
|
||||
internal_n_layers=0,
|
||||
inner_size=None,
|
||||
internal_layers=None,
|
||||
internal_func=None,
|
||||
external_func=None,
|
||||
shared_weights=True
|
||||
self,
|
||||
lifting_operator,
|
||||
projection_operator,
|
||||
edge_features,
|
||||
n_layers=10,
|
||||
internal_n_layers=0,
|
||||
inner_size=None,
|
||||
internal_layers=None,
|
||||
internal_func=None,
|
||||
external_func=None,
|
||||
shared_weights=True,
|
||||
):
|
||||
"""
|
||||
The Graph Neural Operator constructor.
|
||||
@@ -158,9 +161,9 @@ class GraphNeuralOperator(KernelNeuralOperator):
|
||||
external_func=external_func,
|
||||
internal_func=internal_func,
|
||||
n_layers=n_layers,
|
||||
shared_weights=shared_weights
|
||||
shared_weights=shared_weights,
|
||||
),
|
||||
projection_operator=projection_operator
|
||||
projection_operator=projection_operator,
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
@@ -8,6 +8,7 @@ from ...utils import custom_warning_format
|
||||
warnings.formatwarning = custom_warning_format
|
||||
warnings.filterwarnings("always", category=DeprecationWarning)
|
||||
warnings.warn(
|
||||
f"'pina.model.layers' is deprecated and will be removed "
|
||||
f"in future versions. Please use 'pina.model.block' instead.",
|
||||
DeprecationWarning)
|
||||
f"'pina.model.layers' is deprecated and will be removed "
|
||||
f"in future versions. Please use 'pina.model.block' instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user