Layers and Models update PR
* add residual block * add test conv and residual block * modify FFN kwargs
This commit is contained in:
committed by
Nicola Demo
parent
8c16e27ae4
commit
15ecaacb7c
@@ -2,19 +2,17 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
|
||||
class FeedForward(torch.nn.Module):
|
||||
"""
|
||||
The PINA implementation of feedforward network, also refered as multilayer
|
||||
perceptron.
|
||||
|
||||
:param int input_variables: The number of input components of the model.
|
||||
Expected tensor shape of the form (*, input_variables), where *
|
||||
:param int input_dimensons: The number of input components of the model.
|
||||
Expected tensor shape of the form (*, input_dimensons), where *
|
||||
means any number of dimensions including none.
|
||||
:param int output_variables: The number of output components of the model.
|
||||
Expected tensor shape of the form (*, output_variables), where *
|
||||
:param int output_dimensions: The number of output components of the model.
|
||||
Expected tensor shape of the form (*, output_dimensions), where *
|
||||
means any number of dimensions including none.
|
||||
:param int inner_size: number of neurons in the hidden layer(s). Default is
|
||||
20.
|
||||
@@ -28,20 +26,20 @@ class FeedForward(torch.nn.Module):
|
||||
`inner_size` are not considered.
|
||||
:param bool bias: If `True` the MLP will consider some bias.
|
||||
"""
|
||||
def __init__(self, input_variables, output_variables, inner_size=20,
|
||||
def __init__(self, input_dimensons, output_dimensions, inner_size=20,
|
||||
n_layers=2, func=nn.Tanh, layers=None, bias=True):
|
||||
"""
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
|
||||
if not isinstance(input_variables, int):
|
||||
raise ValueError('input_variables expected to be int.')
|
||||
self.input_dimension = input_variables
|
||||
if not isinstance(input_dimensons, int):
|
||||
raise ValueError('input_dimensons expected to be int.')
|
||||
self.input_dimension = input_dimensons
|
||||
|
||||
if not isinstance(output_variables, int):
|
||||
raise ValueError('output_variables expected to be int.')
|
||||
self.output_dimension = output_variables
|
||||
if not isinstance(output_dimensions, int):
|
||||
raise ValueError('output_dimensions expected to be int.')
|
||||
self.output_dimension = output_dimensions
|
||||
if layers is None:
|
||||
layers = [inner_size] * n_layers
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
__all__ = [
|
||||
'BaseContinuousConv',
|
||||
'ContinuousConv'
|
||||
'ContinuousConvBlock',
|
||||
'ResidualBlock'
|
||||
]
|
||||
|
||||
from .convolution import BaseContinuousConv
|
||||
from .convolution_2d import ContinuousConv
|
||||
from .convolution_2d import ContinuousConvBlock
|
||||
from .residual import ResidualBlock
|
||||
|
||||
@@ -6,7 +6,7 @@ from ..feed_forward import FeedForward
|
||||
import torch
|
||||
|
||||
|
||||
class ContinuousConv(BaseContinuousConv):
|
||||
class ContinuousConvBlock(BaseContinuousConv):
|
||||
"""
|
||||
Implementation of Continuous Convolutional operator.
|
||||
|
||||
|
||||
24
pina/model/layers/fourier.py
Normal file
24
pina/model/layers/fourier.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
class FourierBlock(nn.Module):
|
||||
"""Fourier block base class. Implementation of a fourier block.
|
||||
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Li, Zongyi, et al.
|
||||
"Fourier neural operator for parametric partial
|
||||
differential equations." arXiv preprint
|
||||
arXiv:2010.08895 (2020)
|
||||
<https://arxiv.org/abs/2010.08895.pdf>`_.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
pass
|
||||
@@ -1,4 +1,3 @@
|
||||
""" Integral class for continous convolution"""
|
||||
import torch
|
||||
|
||||
|
||||
|
||||
95
pina/model/layers/residual.py
Normal file
95
pina/model/layers/residual.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
class ResidualBlock(nn.Module):
|
||||
"""Residual block base class. Implementation of a residual block.
|
||||
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: He, Kaiming, et al.
|
||||
"Deep residual learning for image recognition."
|
||||
Proceedings of the IEEE conference on computer vision
|
||||
and pattern recognition. 2016..
|
||||
<https://arxiv.org/pdf/1512.03385.pdf>`_.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, input_dim, output_dim,
|
||||
hidden_dim, spectral_norm=False,
|
||||
activation=torch.nn.ReLU()):
|
||||
"""Residual block constructor
|
||||
|
||||
:param int input_dim: Dimension of the input to pass to the
|
||||
feedforward linear layer.
|
||||
:param int output_dim: Dimension of the output from the
|
||||
residual layer.
|
||||
:param int hidden_dim: Hidden dimension for mapping the input
|
||||
(first block).
|
||||
:param bool spectral_norm: Apply spectral normalization to feedforward
|
||||
layers, defaults to False.
|
||||
:param torch.nn.Module activation: Cctivation function after first block.
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
# check consistency
|
||||
check_consistency(spectral_norm, bool)
|
||||
check_consistency(input_dim, int)
|
||||
check_consistency(output_dim, int)
|
||||
check_consistency(hidden_dim, int)
|
||||
check_consistency(activation, torch.nn.Module)
|
||||
|
||||
# assign variables
|
||||
self._spectral_norm = spectral_norm
|
||||
self._input_dim = input_dim
|
||||
self._output_dim = output_dim
|
||||
self._hidden_dim = hidden_dim
|
||||
self._activation = activation
|
||||
|
||||
# create layers
|
||||
self.l1 = self._spect_norm(nn.Linear(input_dim, hidden_dim))
|
||||
self.l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim))
|
||||
self.l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass for residual block layer.
|
||||
|
||||
:param torch.Tensor x: Input tensor for the residual layer.
|
||||
:return: Output tensor for the residual layer.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
y = self.activation(self.l1(x))
|
||||
y = self.l2(y)
|
||||
x = self.l3(x)
|
||||
return y + x
|
||||
|
||||
def _spect_norm(self, x):
|
||||
"""Perform spectral norm on the layers.
|
||||
|
||||
:param x: A torch.nn.Module Linear layer
|
||||
:type x: torch.nn.Module
|
||||
:return: The spectral norm of the layer
|
||||
:rtype: torch.nn.Module
|
||||
"""
|
||||
return nn.utils.spectral_norm(x) if self._spectral_norm else x
|
||||
|
||||
@ property
|
||||
def spectral_norm(self):
|
||||
return self._spectral_norm
|
||||
|
||||
@ property
|
||||
def input_dim(self):
|
||||
return self._input_dim
|
||||
|
||||
@ property
|
||||
def output_dim(self):
|
||||
return self._output_dim
|
||||
|
||||
@ property
|
||||
def hidden_dim(self):
|
||||
return self._hidden_dim
|
||||
|
||||
@ property
|
||||
def activation(self):
|
||||
return self._activation
|
||||
16
pina/model/layers/spectral.py
Normal file
16
pina/model/layers/spectral.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from ...utils import check_consistency
|
||||
|
||||
|
||||
class SpectralConvBlock(nn.Module):
|
||||
"""
|
||||
Implementation of spectral convolution block.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
pass
|
||||
Reference in New Issue
Block a user