diff --git a/docs/source/_rst/tutorials/tutorial5/tutorial.rst b/docs/source/_rst/tutorials/tutorial5/tutorial.rst index e58e9b2..22f9425 100644 --- a/docs/source/_rst/tutorials/tutorial5/tutorial.rst +++ b/docs/source/_rst/tutorials/tutorial5/tutorial.rst @@ -13,8 +13,7 @@ First of all we import the modules needed for the tutorial. Importing from scipy import io import torch from pina.model import FNO, FeedForward # let's import some models - from pina import Condition - from pina import LabelTensor + from pina import Condition, LabelTensor from pina.solvers import SupervisedSolver from pina.trainer import Trainer from pina.problem import AbstractProblem @@ -44,10 +43,10 @@ taken from the authors original reference. data = io.loadmat("Data_Darcy.mat") # extract data (we use only 100 data for train) - k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1) - u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1) - k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1) - u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1) + k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), ['u0']) + u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1), ['u']) + k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1), ['u0']) + u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1), ['u']) x = torch.tensor(data['x'], dtype=torch.float)[0] y = torch.tensor(data['y'], dtype=torch.float)[0] @@ -74,10 +73,10 @@ inheriting from ``AbstractProblem``. .. code:: ipython3 class NeuralOperatorSolver(AbstractProblem): - input_variables = ['u_0'] - output_variables = ['u'] - conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables), - output_points=LabelTensor(u_train, output_variables))} + input_variables = k_train.labels + output_variables = u_train.labels + conditions = {'data' : Condition(input_points=k_train, + output_points=u_train)} # make problem problem = NeuralOperatorSolver() @@ -114,7 +113,7 @@ training using supervised learning. .. parsed-literal:: - Epoch 9: : 100it [00:00, 383.36it/s, v_num=36, mean_loss=0.108] + Epoch 9: : 100it [00:00, 357.28it/s, v_num=1, mean_loss=0.108] .. parsed-literal:: @@ -123,7 +122,7 @@ training using supervised learning. .. parsed-literal:: - Epoch 9: : 100it [00:00, 380.57it/s, v_num=36, mean_loss=0.108] + Epoch 9: : 100it [00:00, 354.81it/s, v_num=1, mean_loss=0.108] The final loss is pretty high… We can calculate the error by importing @@ -137,10 +136,10 @@ The final loss is pretty high… We can calculate the error by importing metric_err = LpLoss(relative=True) - err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100 + err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100 print(f'Final error training {err:.2f}%') - err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100 + err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100 print(f'Final error testing {err:.2f}%') @@ -163,10 +162,10 @@ operator this approach is better suited, as we shall see. projecting_net = torch.nn.Linear(24, 1) model = FNO(lifting_net=lifting_net, projecting_net=projecting_net, - n_modes=16, + n_modes=8, dimensions=2, inner_size=24, - padding=11) + padding=8) # make solver @@ -188,7 +187,7 @@ operator this approach is better suited, as we shall see. .. parsed-literal:: - Epoch 9: : 100it [00:04, 22.13it/s, v_num=37, mean_loss=0.000952] + Epoch 0: : 0it [00:00, ?it/s]Epoch 9: : 100it [00:02, 47.76it/s, v_num=4, mean_loss=0.00106] .. parsed-literal:: @@ -197,7 +196,7 @@ operator this approach is better suited, as we shall see. .. parsed-literal:: - Epoch 9: : 100it [00:04, 22.07it/s, v_num=37, mean_loss=0.000952] + Epoch 9: : 100it [00:02, 47.65it/s, v_num=4, mean_loss=0.00106] We can clearly see that the final loss is lower. Let’s see in testing.. @@ -207,17 +206,17 @@ training, when many data samples are used. .. code:: ipython3 - err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100 + err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100 print(f'Final error training {err:.2f}%') - err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100 + err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100 print(f'Final error testing {err:.2f}%') .. parsed-literal:: - Final error training 4.45% - Final error testing 4.91% + Final error training 4.83% + Final error testing 5.16% As we can see the loss is way lower! diff --git a/pina/model/fno.py b/pina/model/fno.py index 6384a65..e756443 100644 --- a/pina/model/fno.py +++ b/pina/model/fno.py @@ -2,6 +2,8 @@ import torch import torch.nn as nn from ..utils import check_consistency from .layers.fourier import FourierBlock1D, FourierBlock2D, FourierBlock3D +from pina import LabelTensor +import warnings class FNO(torch.nn.Module): @@ -69,7 +71,7 @@ class FNO(torch.nn.Module): elif dimensions == 3: fourier_layer = FourierBlock3D else: - NotImplementedError('FNO implemented only for 1D/2D/3D data.') + raise NotImplementedError('FNO implemented only for 1D/2D/3D data.') # Here we build the FNO by stacking Fourier Blocks @@ -137,6 +139,9 @@ class FNO(torch.nn.Module): :return: The output tensor obtained from the FNO. :rtype: torch.Tensor """ + if isinstance(x, LabelTensor): #TODO remove when Network is fixed + warnings.warn('LabelTensor passed as input is not allowed, casting LabelTensor to Torch.Tensor') + x = x.as_subclass(torch.Tensor) # lifting the input in higher dimensional space x = self._lifting_net(x) diff --git a/pina/model/network.py b/pina/model/network.py index 0f84897..5c5c23d 100644 --- a/pina/model/network.py +++ b/pina/model/network.py @@ -56,6 +56,9 @@ class Network(torch.nn.Module): :param torch.Tensor x: Input of the network. :return torch.Tensor: Output of the network. """ + # only labeltensors as input + assert isinstance(x, LabelTensor), "Expected LabelTensor as input to the model." + # extract torch.Tensor from corresponding label # in case `input_variables = []` all points are used if self._input_variables: @@ -65,22 +68,20 @@ class Network(torch.nn.Module): for feature in self._extra_features: x = x.append(feature(x)) - # convert LabelTensor to torch.Tensor - x = x.as_subclass(torch.Tensor) - - # perform forward pass (using torch.Tensor) + converting to LabelTensor + # perform forward pass + converting to LabelTensor output = self._model(x).as_subclass(LabelTensor) # set the labels for LabelTensor output.labels = self._output_variables return output - + + # TODO to remove in next releases (only used in GAROM solver) def forward_map(self, x): """ Forward method for Network class when the input is - a tuple. This class implements the standard forward method, - and it adds the possibility to pass extra features. + a tuple. This class is simply a forward with the input casted as a + tuple or list :class`torch.Tensor`. All the PINA models ``forward`` s are overriden by this class, to enable :class:`pina.label_tensor.LabelTensor` labels extraction. diff --git a/tests/test_model/test_network.py b/tests/test_model/test_network.py new file mode 100644 index 0000000..20e514d --- /dev/null +++ b/tests/test_model/test_network.py @@ -0,0 +1,37 @@ +import torch +import pytest + +from pina.model.network import Network +from pina.model import FeedForward +from pina import LabelTensor + +data = torch.rand((20, 3)) +data_lt = LabelTensor(data, ['x', 'y', 'z']) +input_dim = 3 +output_dim = 4 +torchmodel = FeedForward(input_dim, output_dim) +extra_feat = [] + + +def test_constructor(): + Network(model=torchmodel, + input_variables=['x', 'y', 'z'], + output_variables=['a', 'b', 'c', 'd'], + extra_features=None) + +def test_forward(): + net = Network(model=torchmodel, + input_variables=['x', 'y', 'z'], + output_variables=['a', 'b', 'c', 'd'], + extra_features=None) + out = net.torchmodel(data) + out_lt = net(data_lt) + assert isinstance(out, torch.Tensor) + assert isinstance(out_lt, LabelTensor) + assert out.shape == (20, 4) + assert out_lt.shape == (20, 4) + assert torch.allclose(out_lt, out) + assert out_lt.labels == ['a', 'b', 'c', 'd'] + + with pytest.raises(AssertionError): + net(data) diff --git a/tutorials/tutorial5/tutorial.ipynb b/tutorials/tutorial5/tutorial.ipynb index 9c4dbeb..aa96cbf 100644 --- a/tutorials/tutorial5/tutorial.ipynb +++ b/tutorials/tutorial5/tutorial.ipynb @@ -19,7 +19,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 1, "id": "5f2744dc", "metadata": {}, "outputs": [], @@ -28,8 +28,7 @@ "from scipy import io\n", "import torch\n", "from pina.model import FNO, FeedForward # let's import some models\n", - "from pina import Condition\n", - "from pina import LabelTensor\n", + "from pina import Condition, LabelTensor\n", "from pina.solvers import SupervisedSolver\n", "from pina.trainer import Trainer\n", "from pina.problem import AbstractProblem\n", @@ -63,10 +62,10 @@ "data = io.loadmat(\"Data_Darcy.mat\")\n", "\n", "# extract data (we use only 100 data for train)\n", - "k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)\n", - "u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)\n", - "k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1)\n", - "u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1)\n", + "k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), ['u0'])\n", + "u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1), ['u'])\n", + "k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1), ['u0'])\n", + "u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1), ['u'])\n", "x = torch.tensor(data['x'], dtype=torch.float)[0]\n", "y = torch.tensor(data['y'], dtype=torch.float)[0]" ] @@ -116,16 +115,16 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 17, "id": "8b27d283", "metadata": {}, "outputs": [], "source": [ "class NeuralOperatorSolver(AbstractProblem):\n", - " input_variables = ['u_0']\n", - " output_variables = ['u']\n", - " conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables), \n", - " output_points=LabelTensor(u_train, output_variables))}\n", + " input_variables = k_train.labels\n", + " output_variables = u_train.labels\n", + " conditions = {'data' : Condition(input_points=k_train, \n", + " output_points=u_train)}\n", "\n", "# make problem\n", "problem = NeuralOperatorSolver()" @@ -143,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 18, "id": "e34f18b0", "metadata": {}, "outputs": [ @@ -161,7 +160,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: : 100it [00:00, 383.36it/s, v_num=36, mean_loss=0.108]" + "Epoch 9: : 100it [00:00, 357.28it/s, v_num=1, mean_loss=0.108]" ] }, { @@ -175,7 +174,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: : 100it [00:00, 380.57it/s, v_num=36, mean_loss=0.108]\n" + "Epoch 9: : 100it [00:00, 354.81it/s, v_num=1, mean_loss=0.108]\n" ] } ], @@ -202,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 19, "id": "0e2a6aa4", "metadata": {}, "outputs": [ @@ -222,10 +221,10 @@ "metric_err = LpLoss(relative=True)\n", "\n", "\n", - "err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100\n", + "err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100\n", "print(f'Final error training {err:.2f}%')\n", "\n", - "err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100\n", + "err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100\n", "print(f'Final error testing {err:.2f}%')" ] }, @@ -241,7 +240,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 24, "id": "9af523a5", "metadata": {}, "outputs": [ @@ -259,7 +258,14 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: : 100it [00:04, 22.13it/s, v_num=37, mean_loss=0.000952]" + "Epoch 0: : 0it [00:00, ?it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Epoch 9: : 100it [00:02, 47.76it/s, v_num=4, mean_loss=0.00106] " ] }, { @@ -273,7 +279,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Epoch 9: : 100it [00:04, 22.07it/s, v_num=37, mean_loss=0.000952]\n" + "Epoch 9: : 100it [00:02, 47.65it/s, v_num=4, mean_loss=0.00106]\n" ] } ], @@ -283,10 +289,10 @@ "projecting_net = torch.nn.Linear(24, 1)\n", "model = FNO(lifting_net=lifting_net,\n", " projecting_net=projecting_net,\n", - " n_modes=16,\n", + " n_modes=8,\n", " dimensions=2,\n", " inner_size=24,\n", - " padding=11)\n", + " padding=8)\n", "\n", "\n", "# make solver\n", @@ -307,7 +313,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 25, "id": "58e2db89", "metadata": {}, "outputs": [ @@ -315,16 +321,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "Final error training 4.45%\n", - "Final error testing 4.91%\n" + "Final error training 4.83%\n", + "Final error testing 5.16%\n" ] } ], "source": [ - "err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100\n", + "err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100\n", "print(f'Final error training {err:.2f}%')\n", "\n", - "err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100\n", + "err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100\n", "print(f'Final error testing {err:.2f}%')" ] }, diff --git a/tutorials/tutorial5/tutorial.py b/tutorials/tutorial5/tutorial.py index 5dd9406..7414a85 100644 --- a/tutorials/tutorial5/tutorial.py +++ b/tutorials/tutorial5/tutorial.py @@ -6,15 +6,14 @@ # In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for # Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input output operations. -# In[11]: +# In[1]: # !pip install scipy # install scipy from scipy import io import torch from pina.model import FNO, FeedForward # let's import some models -from pina import Condition -from pina import LabelTensor +from pina import Condition, LabelTensor from pina.solvers import SupervisedSolver from pina.trainer import Trainer from pina.problem import AbstractProblem @@ -39,10 +38,10 @@ import matplotlib.pyplot as plt data = io.loadmat("Data_Darcy.mat") # extract data (we use only 100 data for train) -k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1) -u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1) -k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1) -u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1) +k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), ['u0']) +u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1), ['u']) +k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1), ['u0']) +u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1), ['u']) x = torch.tensor(data['x'], dtype=torch.float)[0] y = torch.tensor(data['y'], dtype=torch.float)[0] @@ -63,14 +62,14 @@ plt.show() # We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`. -# In[14]: +# In[17]: class NeuralOperatorSolver(AbstractProblem): - input_variables = ['u_0'] - output_variables = ['u'] - conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables), - output_points=LabelTensor(u_train, output_variables))} + input_variables = k_train.labels + output_variables = u_train.labels + conditions = {'data' : Condition(input_points=k_train, + output_points=u_train)} # make problem problem = NeuralOperatorSolver() @@ -80,7 +79,7 @@ problem = NeuralOperatorSolver() # # We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning. -# In[15]: +# In[18]: # make model @@ -97,7 +96,7 @@ trainer.train() # The final loss is pretty high... We can calculate the error by importing `LpLoss`. -# In[16]: +# In[19]: from pina.loss import LpLoss @@ -106,10 +105,10 @@ from pina.loss import LpLoss metric_err = LpLoss(relative=True) -err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100 +err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100 print(f'Final error training {err:.2f}%') -err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100 +err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100 print(f'Final error testing {err:.2f}%') @@ -117,7 +116,7 @@ print(f'Final error testing {err:.2f}%') # # We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see. -# In[17]: +# In[24]: # make model @@ -125,10 +124,10 @@ lifting_net = torch.nn.Linear(1, 24) projecting_net = torch.nn.Linear(24, 1) model = FNO(lifting_net=lifting_net, projecting_net=projecting_net, - n_modes=16, + n_modes=8, dimensions=2, inner_size=24, - padding=11) + padding=8) # make solver @@ -141,13 +140,13 @@ trainer.train() # We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used. -# In[18]: +# In[25]: -err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100 +err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100 print(f'Final error training {err:.2f}%') -err = float(metric_err(u_test.squeeze(-1), solver.models[0](k_test).squeeze(-1)).mean())*100 +err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100 print(f'Final error testing {err:.2f}%')