New Residual Model and Fix relative import
* Adding Residual MLP * Adding test Residual MLP * Modified relative import Continuous Conv
This commit is contained in:
committed by
Nicola Demo
parent
ba7371f350
commit
17464ceca9
@@ -1,6 +1,7 @@
|
||||
__all__ = [
|
||||
'ContinuousConvBlock',
|
||||
'ResidualBlock',
|
||||
'EnhancedLinear',
|
||||
'SpectralConvBlock1D',
|
||||
'SpectralConvBlock2D',
|
||||
'SpectralConvBlock3D',
|
||||
@@ -10,6 +11,6 @@ __all__ = [
|
||||
]
|
||||
|
||||
from .convolution_2d import ContinuousConvBlock
|
||||
from .residual import ResidualBlock
|
||||
from .residual import ResidualBlock, EnhancedLinear
|
||||
from .spectral import SpectralConvBlock1D, SpectralConvBlock2D, SpectralConvBlock3D
|
||||
from .fourier import FourierBlock1D, FourierBlock2D, FourierBlock3D
|
||||
|
||||
@@ -113,6 +113,21 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
|
||||
else:
|
||||
self.transpose = self.transpose_overlap
|
||||
|
||||
class DefaultKernel(torch.nn.Module):
|
||||
def __init__(self, input_dim, output_dim):
|
||||
super().__init__()
|
||||
assert isinstance(input_dim, int)
|
||||
assert isinstance(output_dim, int)
|
||||
self._model = torch.nn.Sequential(
|
||||
torch.nn.Linear(input_dim, 20),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(20, 20),
|
||||
torch.nn.ReLU(),
|
||||
torch.nn.Linear(20, output_dim)
|
||||
)
|
||||
def forward(self, x):
|
||||
return self._model(x)
|
||||
|
||||
@ property
|
||||
def net(self):
|
||||
return self._net
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
from .convolution import BaseContinuousConv
|
||||
from .utils_convolution import check_point, map_points_
|
||||
from .integral import Integral
|
||||
from ..feed_forward import FeedForward
|
||||
import torch
|
||||
|
||||
|
||||
@@ -34,8 +33,8 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
:param stride: Stride for the filter.
|
||||
:type stride: dict
|
||||
:param model: Neural network for inner parametrization,
|
||||
defaults to None. If None, pina.FeedForward is used, more
|
||||
on https://mathlab.github.io/PINA/_rst/fnn.html.
|
||||
defaults to None. If None, a default multilayer perceptron
|
||||
is used, see BaseContinuousConv.DefaultKernel.
|
||||
:type model: torch.nn.Module, optional
|
||||
:param optimize: Flag for performing optimization on the continuous
|
||||
filter, defaults to False. The flag `optimize=True` should be
|
||||
@@ -152,7 +151,7 @@ class ContinuousConvBlock(BaseContinuousConv):
|
||||
nets = []
|
||||
if self._net is None:
|
||||
for _ in range(self._input_numb_field * self._output_numb_field):
|
||||
tmp = FeedForward(len(self._dim), 1)
|
||||
tmp = ContinuousConvBlock.DefaultKernel(len(self._dim), 1)
|
||||
nets.append(tmp)
|
||||
else:
|
||||
if not isinstance(model, object):
|
||||
|
||||
@@ -92,4 +92,39 @@ class ResidualBlock(nn.Module):
|
||||
|
||||
@ property
|
||||
def activation(self):
|
||||
return self._activation
|
||||
return self._activation
|
||||
|
||||
|
||||
class EnhancedLinear(torch.nn.Module):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
def __init__(self, layer, activation=None, dropout=None):
|
||||
super().__init__()
|
||||
|
||||
# check consistency
|
||||
check_consistency(layer, nn.Module)
|
||||
if activation is not None:
|
||||
check_consistency(activation, nn.Module)
|
||||
if dropout is not None:
|
||||
check_consistency(dropout, float)
|
||||
|
||||
# assign forward
|
||||
if (dropout is None) and (activation is None):
|
||||
self._model = torch.nn.Sequential(layer)
|
||||
|
||||
elif (dropout is None) and (activation is not None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
activation)
|
||||
|
||||
elif (dropout is not None) and (activation is None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
self._drop(dropout))
|
||||
|
||||
elif (dropout is not None) and (activation is not None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
activation,
|
||||
self._drop(dropout))
|
||||
|
||||
def forward(self, x):
|
||||
return self._model(x)
|
||||
|
||||
Reference in New Issue
Block a user