diff --git a/pina/model/deeponet.py b/pina/model/deeponet.py index 942f92f..980fd4a 100644 --- a/pina/model/deeponet.py +++ b/pina/model/deeponet.py @@ -18,8 +18,7 @@ class DeepONet(torch.nn.Module): `_ """ - def __init__(self, branch_net, trunk_net, output_variables, inner_size=10, - features=None, features_net=None): + def __init__(self, branch_net, trunk_net, output_variables, inner_size=10): """ :param torch.nn.Module branch_net: the neural network to use as branch model. It has to take as input a :class:`LabelTensor`. The number @@ -30,6 +29,8 @@ class DeepONet(torch.nn.Module): :param list(str) output_variables: the list containing the labels corresponding to the components of the output computed by the model. + :param int inner_size: the output dimension of the branch and trunk + networks. Default is 10. :Example: >>> branch = FFN(input_variables=['a', 'c'], output_variables=20) @@ -74,22 +75,6 @@ class DeepONet(torch.nn.Module): self.trunk_net = trunk_net self.branch_net = branch_net - # if features: - # if len(features) != features_net.layers[0].in_features: - # raise ValueError('Incompatible features') - # if trunk_out_dim != features_net.layers[-1].out_features: - # raise ValueError('Incompatible features') - - # self.features = features - # self.features_net = nn.Sequential( - # nn.Linear(len(features), 10), nn.Softplus(), - # # nn.Linear(10, 10), nn.Softplus(), - # nn.Linear(10, trunk_out_dim) - # ) - # self.features_net = nn.Sequential( - # nn.Linear(len(features), trunk_out_dim) - # ) - self.reduction = nn.Linear(trunk_out_dim, self.output_dimension) @property @@ -105,41 +90,16 @@ class DeepONet(torch.nn.Module): :return: the output computed by the model. :rtype: LabelTensor """ - # print(x.shape) - #input_feature = [] - #for feature in self.features: - # #print(feature) - # input_feature.append(feature(x)) - #input_feature = torch.cat(input_feature, dim=1) branch_output = self.branch_net( x.extract(self.branch_net.input_variables)) - # print(branch_output.shape) + trunk_output = self.trunk_net( x.extract(self.trunk_net.input_variables)) - # print(trunk_output.shape) - #feat_output = self.features_net(input_feature) - # print(feat_output.shape) - # inner_input = torch.cat([ - # branch_output * trunk_output, - # branch_output, - # trunk_output, - # feat_output], dim=1) - # print(inner_input.shape) - # output_ = self.reduction(inner_input) - # print(output_.shape) output_ = self.reduction(trunk_output * branch_output) - # output_ = LabelTensor(output_, self.output_variables) + output_ = output_.as_subclass(LabelTensor) output_.labels = self.output_variables - # local_size = int(trunk_output.shape[1]/self.output_dimension) - # for i, var in enumerate(self.output_variables): - # start = i*local_size - # stop = (i+1)*local_size - # local_output = LabelTensor(torch.sum(branch_output[:, start:stop] * trunk_output[:, start:stop], dim=1).reshape(-1, 1), var) - # if i==0: - # output_ = local_output - # else: - # output_ = output_.append(local_output) + return output_