fnn update, pinn torch models, tests update. (#88)
* fnn update, remove labeltensors * allow custom torch models * updating tests --------- Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local> Co-authored-by: Dario Coscia <dariocoscia@dhcp-031.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
c8fb7715c4
commit
be11110bb2
@@ -10,10 +10,12 @@ class FeedForward(torch.nn.Module):
|
||||
The PINA implementation of feedforward network, also refered as multilayer
|
||||
perceptron.
|
||||
|
||||
:param list(str) input_variables: the list containing the labels
|
||||
corresponding to the input components of the model.
|
||||
:param list(str) output_variables: the list containing the labels
|
||||
corresponding to the components of the output computed by the model.
|
||||
:param int input_variables: The number of input components of the model.
|
||||
Expected tensor shape of the form (*, input_variables), where *
|
||||
means any number of dimensions including none.
|
||||
:param int output_variables: The number of output components of the model.
|
||||
Expected tensor shape of the form (*, output_variables), where *
|
||||
means any number of dimensions including none.
|
||||
:param int inner_size: number of neurons in the hidden layer(s). Default is
|
||||
20.
|
||||
:param int n_layers: number of hidden layers. Default is 2.
|
||||
@@ -24,46 +26,31 @@ class FeedForward(torch.nn.Module):
|
||||
:param iterable(int) layers: a list containing the number of neurons for
|
||||
any hidden layers. If specified, the parameters `n_layers` e
|
||||
`inner_size` are not considered.
|
||||
:param iterable(torch.nn.Module) extra_features: the additional input
|
||||
features to use ad augmented input.
|
||||
:param bool bias: If `True` the MLP will consider some bias.
|
||||
"""
|
||||
def __init__(self, input_variables, output_variables, inner_size=20,
|
||||
n_layers=2, func=nn.Tanh, layers=None, extra_features=None,
|
||||
bias=True):
|
||||
n_layers=2, func=nn.Tanh, layers=None, bias=True):
|
||||
"""
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
if extra_features is None:
|
||||
extra_features = []
|
||||
self.extra_features = nn.Sequential(*extra_features)
|
||||
|
||||
if isinstance(input_variables, int):
|
||||
self.input_variables = None
|
||||
self.input_dimension = input_variables
|
||||
elif isinstance(input_variables, (tuple, list)):
|
||||
self.input_variables = input_variables
|
||||
self.input_dimension = len(input_variables)
|
||||
|
||||
if isinstance(output_variables, int):
|
||||
self.output_variables = None
|
||||
self.output_dimension = output_variables
|
||||
elif isinstance(output_variables, (tuple, list)):
|
||||
self.output_variables = output_variables
|
||||
self.output_dimension = len(output_variables)
|
||||
|
||||
n_features = len(extra_features)
|
||||
if not isinstance(input_variables, int):
|
||||
raise ValueError('input_variables expected to be int.')
|
||||
self.input_dimension = input_variables
|
||||
|
||||
if not isinstance(output_variables, int):
|
||||
raise ValueError('output_variables expected to be int.')
|
||||
self.output_dimension = output_variables
|
||||
if layers is None:
|
||||
layers = [inner_size] * n_layers
|
||||
|
||||
tmp_layers = layers.copy()
|
||||
tmp_layers.insert(0, self.input_dimension+n_features)
|
||||
tmp_layers.insert(0, self.input_dimension)
|
||||
tmp_layers.append(self.output_dimension)
|
||||
|
||||
self.layers = []
|
||||
for i in range(len(tmp_layers)-1):
|
||||
for i in range(len(tmp_layers) - 1):
|
||||
self.layers.append(
|
||||
nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias)
|
||||
)
|
||||
@@ -71,7 +58,7 @@ class FeedForward(torch.nn.Module):
|
||||
if isinstance(func, list):
|
||||
self.functions = func
|
||||
else:
|
||||
self.functions = [func for _ in range(len(self.layers)-1)]
|
||||
self.functions = [func for _ in range(len(self.layers) - 1)]
|
||||
|
||||
if len(self.layers) != len(self.functions) + 1:
|
||||
raise RuntimeError('uncosistent number of layers and functions')
|
||||
@@ -94,16 +81,4 @@ class FeedForward(torch.nn.Module):
|
||||
:return: the output computed by the model.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
if self.input_variables:
|
||||
x = x.extract(self.input_variables)
|
||||
|
||||
for feature in self.extra_features:
|
||||
x = x.append(feature(x))
|
||||
|
||||
output = self.model(x).as_subclass(LabelTensor)
|
||||
|
||||
if self.output_variables:
|
||||
output.labels = self.output_variables
|
||||
|
||||
return output
|
||||
return self.model(x)
|
||||
|
||||
Reference in New Issue
Block a user