diff --git a/tutorials/tutorial1/tutorial.ipynb b/tutorials/tutorial1/tutorial.ipynb index ccc82ee..21b4fc2 100644 --- a/tutorials/tutorial1/tutorial.ipynb +++ b/tutorials/tutorial1/tutorial.ipynb @@ -137,7 +137,15 @@ "execution_count": 2, "id": "f2608e2e", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/matte_b/PINA/pina/operators.py: DeprecationWarning: 'pina.operators' is deprecated and will be removed in future versions. Please use 'pina.operator' instead.\n" + ] + } + ], "source": [ "from pina.problem import SpatialProblem\n", "from pina.operator import grad\n", diff --git a/tutorials/tutorial1/tutorial.py b/tutorials/tutorial1/tutorial.py index a12dca3..4885d92 100644 --- a/tutorials/tutorial1/tutorial.py +++ b/tutorials/tutorial1/tutorial.py @@ -89,7 +89,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem): # # Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**: -# In[2]: +# In[ ]: from pina.problem import SpatialProblem diff --git a/tutorials/tutorial4/tutorial.ipynb b/tutorials/tutorial4/tutorial.ipynb index 505c59c..c5b16a3 100644 --- a/tutorials/tutorial4/tutorial.ipynb +++ b/tutorials/tutorial4/tutorial.ipynb @@ -869,16 +869,16 @@ "output = net(input_data).detach()\n", "\n", "# visualize data\n", - "fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", - "pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n", - "axes[0].set_title(\"Real\")\n", - "fig.colorbar(pic1)\n", - "plt.subplot(1, 2, 2)\n", - "pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n", - "axes[1].set_title(\"Autoencoder\")\n", - "fig.colorbar(pic2)\n", - "plt.tight_layout()\n", - "plt.show()\n" + "#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", + "#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n", + "#axes[0].set_title(\"Real\")\n", + "#fig.colorbar(pic1)\n", + "#plt.subplot(1, 2, 2)\n", + "#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n", + "#axes[1].set_title(\"Autoencoder\")\n", + "#fig.colorbar(pic2)\n", + "#plt.tight_layout()\n", + "#plt.show()\n" ] }, { @@ -963,16 +963,16 @@ "output = net.decoder(latent, input_data2).detach()\n", "\n", "# show the picture\n", - "fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", - "pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n", - "axes[0].set_title(\"Real\")\n", - "fig.colorbar(pic1)\n", - "plt.subplot(1, 2, 2)\n", - "pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n", - "axes[1].set_title(\"Up-sampling\")\n", - "fig.colorbar(pic2)\n", - "plt.tight_layout()\n", - "plt.show()\n" + "#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", + "#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n", + "#axes[0].set_title(\"Real\")\n", + "#fig.colorbar(pic1)\n", + "#plt.subplot(1, 2, 2)\n", + "#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n", + "# axes[1].set_title(\"Up-sampling\")\n", + "#fig.colorbar(pic2)\n", + "#plt.tight_layout()\n", + "#plt.show()\n" ] }, { @@ -1051,16 +1051,16 @@ "output = net.decoder(latent, input_data2).detach()\n", "\n", "# show the picture\n", - "fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", - "pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n", - "axes[0].set_title(\"Real\")\n", - "fig.colorbar(pic1)\n", - "plt.subplot(1, 2, 2)\n", - "pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n", - "axes[1].set_title(\"Autoencoder not re-trained\")\n", - "fig.colorbar(pic2)\n", - "plt.tight_layout()\n", - "plt.show()\n", + "#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n", + "#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n", + "#axes[0].set_title(\"Real\")\n", + "#fig.colorbar(pic1)\n", + "#plt.subplot(1, 2, 2)\n", + "#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n", + "#axes[1].set_title(\"Autoencoder not re-trained\")\n", + "#fig.colorbar(pic2)\n", + "#plt.tight_layout()\n", + "#plt.show()\n", "\n", "# calculate l2 error\n", "print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')" diff --git a/tutorials/tutorial4/tutorial.py b/tutorials/tutorial4/tutorial.py index ee1e7aa..ec191a6 100644 --- a/tutorials/tutorial4/tutorial.py +++ b/tutorials/tutorial4/tutorial.py @@ -530,16 +530,16 @@ net.eval() output = net(input_data).detach() # visualize data -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) -pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1]) -axes[0].set_title("Real") -fig.colorbar(pic1) -plt.subplot(1, 2, 2) -pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1]) -axes[1].set_title("Autoencoder") -fig.colorbar(pic2) -plt.tight_layout() -plt.show() +#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) +#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1]) +#axes[0].set_title("Real") +#fig.colorbar(pic1) +#plt.subplot(1, 2, 2) +#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1]) +#axes[1].set_title("Autoencoder") +#fig.colorbar(pic2) +#plt.tight_layout() +#plt.show() # As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well: @@ -579,16 +579,16 @@ latent = net.encoder(input_data) output = net.decoder(latent, input_data2).detach() # show the picture -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) -pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1]) -axes[0].set_title("Real") -fig.colorbar(pic1) -plt.subplot(1, 2, 2) -pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1]) -axes[1].set_title("Up-sampling") -fig.colorbar(pic2) -plt.tight_layout() -plt.show() +#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) +#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1]) +#axes[0].set_title("Real") +#fig.colorbar(pic1) +#plt.subplot(1, 2, 2) +#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1]) +# axes[1].set_title("Up-sampling") +#fig.colorbar(pic2) +#plt.tight_layout() +#plt.show() # As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now: @@ -621,16 +621,16 @@ latent = net.encoder(input_data2) output = net.decoder(latent, input_data2).detach() # show the picture -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) -pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1]) -axes[0].set_title("Real") -fig.colorbar(pic1) -plt.subplot(1, 2, 2) -pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1]) -axes[1].set_title("Autoencoder not re-trained") -fig.colorbar(pic2) -plt.tight_layout() -plt.show() +#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3)) +#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1]) +#axes[0].set_title("Real") +#fig.colorbar(pic1) +#plt.subplot(1, 2, 2) +#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1]) +#axes[1].set_title("Autoencoder not re-trained") +#fig.colorbar(pic2) +#plt.tight_layout() +#plt.show() # calculate l2 error print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')