Add plot in tutorials 1,3,4,9
This commit is contained in:
committed by
Nicola Demo
parent
14a6008437
commit
98d4e1fd76
10
tutorials/tutorial1/tutorial.ipynb
vendored
10
tutorials/tutorial1/tutorial.ipynb
vendored
@@ -137,15 +137,7 @@
|
||||
"execution_count": 2,
|
||||
"id": "f2608e2e",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/matte_b/PINA/pina/operators.py: DeprecationWarning: 'pina.operators' is deprecated and will be removed in future versions. Please use 'pina.operator' instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pina.problem import SpatialProblem\n",
|
||||
"from pina.operator import grad\n",
|
||||
|
||||
2
tutorials/tutorial1/tutorial.py
vendored
2
tutorials/tutorial1/tutorial.py
vendored
@@ -89,7 +89,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
|
||||
#
|
||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||
|
||||
# In[ ]:
|
||||
# In[2]:
|
||||
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
|
||||
60
tutorials/tutorial4/tutorial.ipynb
vendored
60
tutorials/tutorial4/tutorial.ipynb
vendored
@@ -869,16 +869,16 @@
|
||||
"output = net(input_data).detach()\n",
|
||||
"\n",
|
||||
"# visualize data\n",
|
||||
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n",
|
||||
"#axes[0].set_title(\"Real\")\n",
|
||||
"#fig.colorbar(pic1)\n",
|
||||
"#plt.subplot(1, 2, 2)\n",
|
||||
"#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"#axes[1].set_title(\"Autoencoder\")\n",
|
||||
"#fig.colorbar(pic2)\n",
|
||||
"#plt.tight_layout()\n",
|
||||
"#plt.show()\n"
|
||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n",
|
||||
"axes[0].set_title(\"Real\")\n",
|
||||
"fig.colorbar(pic1)\n",
|
||||
"plt.subplot(1, 2, 2)\n",
|
||||
"pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"axes[1].set_title(\"Autoencoder\")\n",
|
||||
"fig.colorbar(pic2)\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -963,16 +963,16 @@
|
||||
"output = net.decoder(latent, input_data2).detach()\n",
|
||||
"\n",
|
||||
"# show the picture\n",
|
||||
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||
"#axes[0].set_title(\"Real\")\n",
|
||||
"#fig.colorbar(pic1)\n",
|
||||
"#plt.subplot(1, 2, 2)\n",
|
||||
"#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"# axes[1].set_title(\"Up-sampling\")\n",
|
||||
"#fig.colorbar(pic2)\n",
|
||||
"#plt.tight_layout()\n",
|
||||
"#plt.show()\n"
|
||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||
"axes[0].set_title(\"Real\")\n",
|
||||
"fig.colorbar(pic1)\n",
|
||||
"plt.subplot(1, 2, 2)\n",
|
||||
"pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"axes[1].set_title(\"Up-sampling\")\n",
|
||||
"fig.colorbar(pic2)\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1051,16 +1051,16 @@
|
||||
"output = net.decoder(latent, input_data2).detach()\n",
|
||||
"\n",
|
||||
"# show the picture\n",
|
||||
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||
"#axes[0].set_title(\"Real\")\n",
|
||||
"#fig.colorbar(pic1)\n",
|
||||
"#plt.subplot(1, 2, 2)\n",
|
||||
"#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"#axes[1].set_title(\"Autoencoder not re-trained\")\n",
|
||||
"#fig.colorbar(pic2)\n",
|
||||
"#plt.tight_layout()\n",
|
||||
"#plt.show()\n",
|
||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||
"pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||
"axes[0].set_title(\"Real\")\n",
|
||||
"fig.colorbar(pic1)\n",
|
||||
"plt.subplot(1, 2, 2)\n",
|
||||
"pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||
"axes[1].set_title(\"Autoencoder not re-trained\")\n",
|
||||
"fig.colorbar(pic2)\n",
|
||||
"plt.tight_layout()\n",
|
||||
"plt.show()\n",
|
||||
"\n",
|
||||
"# calculate l2 error\n",
|
||||
"print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')"
|
||||
|
||||
60
tutorials/tutorial4/tutorial.py
vendored
60
tutorials/tutorial4/tutorial.py
vendored
@@ -530,16 +530,16 @@ net.eval()
|
||||
output = net(input_data).detach()
|
||||
|
||||
# visualize data
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
|
||||
@@ -579,16 +579,16 @@ latent = net.encoder(input_data)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
# axes[1].set_title("Up-sampling")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Up-sampling")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
|
||||
@@ -621,16 +621,16 @@ latent = net.encoder(input_data2)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder not re-trained")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder not re-trained")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
# calculate l2 error
|
||||
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
|
||||
Reference in New Issue
Block a user