Rename classes and modules for GNO
This commit is contained in:
committed by
Nicola Demo
parent
bd24b0c1c2
commit
6964f4e7d9
@@ -10,7 +10,7 @@ __all__ = [
|
|||||||
"AveragingNeuralOperator",
|
"AveragingNeuralOperator",
|
||||||
"LowRankNeuralOperator",
|
"LowRankNeuralOperator",
|
||||||
"Spline",
|
"Spline",
|
||||||
"GNO"
|
"GraphNeuralOperator"
|
||||||
]
|
]
|
||||||
|
|
||||||
from .feed_forward import FeedForward, ResidualFeedForward
|
from .feed_forward import FeedForward, ResidualFeedForward
|
||||||
@@ -21,4 +21,4 @@ from .base_no import KernelNeuralOperator
|
|||||||
from .avno import AveragingNeuralOperator
|
from .avno import AveragingNeuralOperator
|
||||||
from .lno import LowRankNeuralOperator
|
from .lno import LowRankNeuralOperator
|
||||||
from .spline import Spline
|
from .spline import Spline
|
||||||
from .gno import GNO
|
from .gno import GraphNeuralOperator
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import torch
|
import torch
|
||||||
from torch.nn import Tanh
|
from torch.nn import Tanh
|
||||||
from .layers import GraphIntegralLayer
|
from .layers import GNOBlock
|
||||||
from .base_no import KernelNeuralOperator
|
from .base_no import KernelNeuralOperator
|
||||||
|
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ class GraphNeuralKernel(torch.nn.Module):
|
|||||||
internal_func = Tanh
|
internal_func = Tanh
|
||||||
|
|
||||||
if shared_weights:
|
if shared_weights:
|
||||||
self.layers = GraphIntegralLayer(
|
self.layers = GNOBlock(
|
||||||
width=width,
|
width=width,
|
||||||
edges_features=edge_features,
|
edges_features=edge_features,
|
||||||
n_layers=internal_n_layers,
|
n_layers=internal_n_layers,
|
||||||
@@ -58,7 +58,7 @@ class GraphNeuralKernel(torch.nn.Module):
|
|||||||
self.forward = self.forward_shared
|
self.forward = self.forward_shared
|
||||||
else:
|
else:
|
||||||
self.layers = torch.nn.ModuleList(
|
self.layers = torch.nn.ModuleList(
|
||||||
[GraphIntegralLayer(
|
[GNOBlock(
|
||||||
width=width,
|
width=width,
|
||||||
edges_features=edge_features,
|
edges_features=edge_features,
|
||||||
n_layers=internal_n_layers,
|
n_layers=internal_n_layers,
|
||||||
@@ -101,7 +101,7 @@ class GraphNeuralKernel(torch.nn.Module):
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
class GNO(KernelNeuralOperator):
|
class GraphNeuralOperator(KernelNeuralOperator):
|
||||||
"""
|
"""
|
||||||
TODO add docstring
|
TODO add docstring
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ __all__ = [
|
|||||||
"AVNOBlock",
|
"AVNOBlock",
|
||||||
"LowRankBlock",
|
"LowRankBlock",
|
||||||
"RBFBlock",
|
"RBFBlock",
|
||||||
"GraphIntegralLayer"
|
"GNOBlock"
|
||||||
]
|
]
|
||||||
|
|
||||||
from .convolution_2d import ContinuousConvBlock
|
from .convolution_2d import ContinuousConvBlock
|
||||||
@@ -32,4 +32,4 @@ from .embedding import PeriodicBoundaryEmbedding, FourierFeatureEmbedding
|
|||||||
from .avno_layer import AVNOBlock
|
from .avno_layer import AVNOBlock
|
||||||
from .lowrank_layer import LowRankBlock
|
from .lowrank_layer import LowRankBlock
|
||||||
from .rbf_layer import RBFBlock
|
from .rbf_layer import RBFBlock
|
||||||
from .graph_integral_kernel import GraphIntegralLayer
|
from .gno_block import GNOBlock
|
||||||
|
|||||||
@@ -2,10 +2,11 @@ import torch
|
|||||||
from torch_geometric.nn import MessagePassing
|
from torch_geometric.nn import MessagePassing
|
||||||
|
|
||||||
|
|
||||||
class GraphIntegralLayer(MessagePassing):
|
class GNOBlock(MessagePassing):
|
||||||
"""
|
"""
|
||||||
TODO: Add documentation
|
TODO: Add documentation
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
width,
|
width,
|
||||||
@@ -27,7 +28,7 @@ class GraphIntegralLayer(MessagePassing):
|
|||||||
:type n_layers: int
|
:type n_layers: int
|
||||||
"""
|
"""
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
super(GraphIntegralLayer, self).__init__(aggr='mean')
|
super(GNOBlock, self).__init__(aggr='mean')
|
||||||
self.width = width
|
self.width = width
|
||||||
if layers is None and inner_size is None:
|
if layers is None and inner_size is None:
|
||||||
inner_size = width
|
inner_size = width
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
from pina.graph import KNNGraph
|
from pina.graph import KNNGraph
|
||||||
from pina.model import GNO
|
from pina.model import GraphNeuralOperator
|
||||||
from torch_geometric.data import Batch
|
from torch_geometric.data import Batch
|
||||||
|
|
||||||
x = [torch.rand(100, 6) for _ in range(10)]
|
x = [torch.rand(100, 6) for _ in range(10)]
|
||||||
@@ -10,7 +10,6 @@ graph = KNNGraph(x=x, pos=pos, build_edge_attr=True, k=6)
|
|||||||
input_ = Batch.from_data_list(graph.data)
|
input_ = Batch.from_data_list(graph.data)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"shared_weights",
|
"shared_weights",
|
||||||
[
|
[
|
||||||
@@ -21,29 +20,29 @@ input_ = Batch.from_data_list(graph.data)
|
|||||||
def test_constructor(shared_weights):
|
def test_constructor(shared_weights):
|
||||||
lifting_operator = torch.nn.Linear(6, 16)
|
lifting_operator = torch.nn.Linear(6, 16)
|
||||||
projection_operator = torch.nn.Linear(16, 3)
|
projection_operator = torch.nn.Linear(16, 3)
|
||||||
GNO(lifting_operator=lifting_operator,
|
GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
internal_layers=[16, 16],
|
internal_layers=[16, 16],
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
|
|
||||||
GNO(lifting_operator=lifting_operator,
|
GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
inner_size=16,
|
inner_size=16,
|
||||||
internal_n_layers=10,
|
internal_n_layers=10,
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
|
|
||||||
int_func = torch.nn.Softplus
|
int_func = torch.nn.Softplus
|
||||||
ext_func = torch.nn.ReLU
|
ext_func = torch.nn.ReLU
|
||||||
|
|
||||||
GNO(lifting_operator=lifting_operator,
|
GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
internal_n_layers=10,
|
internal_n_layers=10,
|
||||||
shared_weights=shared_weights,
|
shared_weights=shared_weights,
|
||||||
internal_func=int_func,
|
internal_func=int_func,
|
||||||
external_func=ext_func)
|
external_func=ext_func)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
@@ -56,14 +55,15 @@ def test_constructor(shared_weights):
|
|||||||
def test_forward_1(shared_weights):
|
def test_forward_1(shared_weights):
|
||||||
lifting_operator = torch.nn.Linear(6, 16)
|
lifting_operator = torch.nn.Linear(6, 16)
|
||||||
projection_operator = torch.nn.Linear(16, 3)
|
projection_operator = torch.nn.Linear(16, 3)
|
||||||
model = GNO(lifting_operator=lifting_operator,
|
model = GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
internal_layers=[16, 16],
|
internal_layers=[16, 16],
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
output_ = model(input_)
|
output_ = model(input_)
|
||||||
assert output_.shape == torch.Size([1000, 3])
|
assert output_.shape == torch.Size([1000, 3])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"shared_weights",
|
"shared_weights",
|
||||||
[
|
[
|
||||||
@@ -74,15 +74,16 @@ def test_forward_1(shared_weights):
|
|||||||
def test_forward_2(shared_weights):
|
def test_forward_2(shared_weights):
|
||||||
lifting_operator = torch.nn.Linear(6, 16)
|
lifting_operator = torch.nn.Linear(6, 16)
|
||||||
projection_operator = torch.nn.Linear(16, 3)
|
projection_operator = torch.nn.Linear(16, 3)
|
||||||
model = GNO(lifting_operator=lifting_operator,
|
model = GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
inner_size=32,
|
inner_size=32,
|
||||||
internal_n_layers=2,
|
internal_n_layers=2,
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
output_ = model(input_)
|
output_ = model(input_)
|
||||||
assert output_.shape == torch.Size([1000, 3])
|
assert output_.shape == torch.Size([1000, 3])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"shared_weights",
|
"shared_weights",
|
||||||
[
|
[
|
||||||
@@ -93,17 +94,18 @@ def test_forward_2(shared_weights):
|
|||||||
def test_backward(shared_weights):
|
def test_backward(shared_weights):
|
||||||
lifting_operator = torch.nn.Linear(6, 16)
|
lifting_operator = torch.nn.Linear(6, 16)
|
||||||
projection_operator = torch.nn.Linear(16, 3)
|
projection_operator = torch.nn.Linear(16, 3)
|
||||||
model = GNO(lifting_operator=lifting_operator,
|
model = GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
internal_layers=[16, 16],
|
internal_layers=[16, 16],
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
input_.x.requires_grad = True
|
input_.x.requires_grad = True
|
||||||
output_ = model(input_)
|
output_ = model(input_)
|
||||||
l = torch.mean(output_)
|
l = torch.mean(output_)
|
||||||
l.backward()
|
l.backward()
|
||||||
assert input_.x.grad.shape == torch.Size([1000, 6])
|
assert input_.x.grad.shape == torch.Size([1000, 6])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"shared_weights",
|
"shared_weights",
|
||||||
[
|
[
|
||||||
@@ -114,14 +116,14 @@ def test_backward(shared_weights):
|
|||||||
def test_backward_2(shared_weights):
|
def test_backward_2(shared_weights):
|
||||||
lifting_operator = torch.nn.Linear(6, 16)
|
lifting_operator = torch.nn.Linear(6, 16)
|
||||||
projection_operator = torch.nn.Linear(16, 3)
|
projection_operator = torch.nn.Linear(16, 3)
|
||||||
model = GNO(lifting_operator=lifting_operator,
|
model = GraphNeuralOperator(lifting_operator=lifting_operator,
|
||||||
projection_operator=projection_operator,
|
projection_operator=projection_operator,
|
||||||
edge_features=3,
|
edge_features=3,
|
||||||
inner_size=32,
|
inner_size=32,
|
||||||
internal_n_layers=2,
|
internal_n_layers=2,
|
||||||
shared_weights=shared_weights)
|
shared_weights=shared_weights)
|
||||||
input_.x.requires_grad = True
|
input_.x.requires_grad = True
|
||||||
output_ = model(input_)
|
output_ = model(input_)
|
||||||
l = torch.mean(output_)
|
l = torch.mean(output_)
|
||||||
l.backward()
|
l.backward()
|
||||||
assert input_.x.grad.shape == torch.Size([1000, 6])
|
assert input_.x.grad.shape == torch.Size([1000, 6])
|
||||||
|
|||||||
Reference in New Issue
Block a user