Update tutorials 1 through 12 to current version 0.2
This commit is contained in:
committed by
Nicola Demo
parent
10ea59e15a
commit
14a6008437
10
tutorials/tutorial1/tutorial.ipynb
vendored
10
tutorials/tutorial1/tutorial.ipynb
vendored
@@ -137,7 +137,15 @@
|
|||||||
"execution_count": 2,
|
"execution_count": 2,
|
||||||
"id": "f2608e2e",
|
"id": "f2608e2e",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/home/matte_b/PINA/pina/operators.py: DeprecationWarning: 'pina.operators' is deprecated and will be removed in future versions. Please use 'pina.operator' instead.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from pina.problem import SpatialProblem\n",
|
"from pina.problem import SpatialProblem\n",
|
||||||
"from pina.operator import grad\n",
|
"from pina.operator import grad\n",
|
||||||
|
|||||||
2
tutorials/tutorial1/tutorial.py
vendored
2
tutorials/tutorial1/tutorial.py
vendored
@@ -89,7 +89,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
|
|||||||
#
|
#
|
||||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||||
|
|
||||||
# In[2]:
|
# In[ ]:
|
||||||
|
|
||||||
|
|
||||||
from pina.problem import SpatialProblem
|
from pina.problem import SpatialProblem
|
||||||
|
|||||||
60
tutorials/tutorial4/tutorial.ipynb
vendored
60
tutorials/tutorial4/tutorial.ipynb
vendored
@@ -869,16 +869,16 @@
|
|||||||
"output = net(input_data).detach()\n",
|
"output = net(input_data).detach()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# visualize data\n",
|
"# visualize data\n",
|
||||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||||
"pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n",
|
"#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])\n",
|
||||||
"axes[0].set_title(\"Real\")\n",
|
"#axes[0].set_title(\"Real\")\n",
|
||||||
"fig.colorbar(pic1)\n",
|
"#fig.colorbar(pic1)\n",
|
||||||
"plt.subplot(1, 2, 2)\n",
|
"#plt.subplot(1, 2, 2)\n",
|
||||||
"pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n",
|
"#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])\n",
|
||||||
"axes[1].set_title(\"Autoencoder\")\n",
|
"#axes[1].set_title(\"Autoencoder\")\n",
|
||||||
"fig.colorbar(pic2)\n",
|
"#fig.colorbar(pic2)\n",
|
||||||
"plt.tight_layout()\n",
|
"#plt.tight_layout()\n",
|
||||||
"plt.show()\n"
|
"#plt.show()\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -963,16 +963,16 @@
|
|||||||
"output = net.decoder(latent, input_data2).detach()\n",
|
"output = net.decoder(latent, input_data2).detach()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# show the picture\n",
|
"# show the picture\n",
|
||||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||||
"pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
"#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||||
"axes[0].set_title(\"Real\")\n",
|
"#axes[0].set_title(\"Real\")\n",
|
||||||
"fig.colorbar(pic1)\n",
|
"#fig.colorbar(pic1)\n",
|
||||||
"plt.subplot(1, 2, 2)\n",
|
"#plt.subplot(1, 2, 2)\n",
|
||||||
"pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
"#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||||
"axes[1].set_title(\"Up-sampling\")\n",
|
"# axes[1].set_title(\"Up-sampling\")\n",
|
||||||
"fig.colorbar(pic2)\n",
|
"#fig.colorbar(pic2)\n",
|
||||||
"plt.tight_layout()\n",
|
"#plt.tight_layout()\n",
|
||||||
"plt.show()\n"
|
"#plt.show()\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -1051,16 +1051,16 @@
|
|||||||
"output = net.decoder(latent, input_data2).detach()\n",
|
"output = net.decoder(latent, input_data2).detach()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# show the picture\n",
|
"# show the picture\n",
|
||||||
"fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
"#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))\n",
|
||||||
"pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
"#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])\n",
|
||||||
"axes[0].set_title(\"Real\")\n",
|
"#axes[0].set_title(\"Real\")\n",
|
||||||
"fig.colorbar(pic1)\n",
|
"#fig.colorbar(pic1)\n",
|
||||||
"plt.subplot(1, 2, 2)\n",
|
"#plt.subplot(1, 2, 2)\n",
|
||||||
"pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
"#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])\n",
|
||||||
"axes[1].set_title(\"Autoencoder not re-trained\")\n",
|
"#axes[1].set_title(\"Autoencoder not re-trained\")\n",
|
||||||
"fig.colorbar(pic2)\n",
|
"#fig.colorbar(pic2)\n",
|
||||||
"plt.tight_layout()\n",
|
"#plt.tight_layout()\n",
|
||||||
"plt.show()\n",
|
"#plt.show()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# calculate l2 error\n",
|
"# calculate l2 error\n",
|
||||||
"print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')"
|
"print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')"
|
||||||
|
|||||||
60
tutorials/tutorial4/tutorial.py
vendored
60
tutorials/tutorial4/tutorial.py
vendored
@@ -530,16 +530,16 @@ net.eval()
|
|||||||
output = net(input_data).detach()
|
output = net(input_data).detach()
|
||||||
|
|
||||||
# visualize data
|
# visualize data
|
||||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||||
pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||||
axes[0].set_title("Real")
|
#axes[0].set_title("Real")
|
||||||
fig.colorbar(pic1)
|
#fig.colorbar(pic1)
|
||||||
plt.subplot(1, 2, 2)
|
#plt.subplot(1, 2, 2)
|
||||||
pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||||
axes[1].set_title("Autoencoder")
|
#axes[1].set_title("Autoencoder")
|
||||||
fig.colorbar(pic2)
|
#fig.colorbar(pic2)
|
||||||
plt.tight_layout()
|
#plt.tight_layout()
|
||||||
plt.show()
|
#plt.show()
|
||||||
|
|
||||||
|
|
||||||
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
|
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
|
||||||
@@ -579,16 +579,16 @@ latent = net.encoder(input_data)
|
|||||||
output = net.decoder(latent, input_data2).detach()
|
output = net.decoder(latent, input_data2).detach()
|
||||||
|
|
||||||
# show the picture
|
# show the picture
|
||||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||||
axes[0].set_title("Real")
|
#axes[0].set_title("Real")
|
||||||
fig.colorbar(pic1)
|
#fig.colorbar(pic1)
|
||||||
plt.subplot(1, 2, 2)
|
#plt.subplot(1, 2, 2)
|
||||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||||
axes[1].set_title("Up-sampling")
|
# axes[1].set_title("Up-sampling")
|
||||||
fig.colorbar(pic2)
|
#fig.colorbar(pic2)
|
||||||
plt.tight_layout()
|
#plt.tight_layout()
|
||||||
plt.show()
|
#plt.show()
|
||||||
|
|
||||||
|
|
||||||
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
|
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
|
||||||
@@ -621,16 +621,16 @@ latent = net.encoder(input_data2)
|
|||||||
output = net.decoder(latent, input_data2).detach()
|
output = net.decoder(latent, input_data2).detach()
|
||||||
|
|
||||||
# show the picture
|
# show the picture
|
||||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||||
axes[0].set_title("Real")
|
#axes[0].set_title("Real")
|
||||||
fig.colorbar(pic1)
|
#fig.colorbar(pic1)
|
||||||
plt.subplot(1, 2, 2)
|
#plt.subplot(1, 2, 2)
|
||||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||||
axes[1].set_title("Autoencoder not re-trained")
|
#axes[1].set_title("Autoencoder not re-trained")
|
||||||
fig.colorbar(pic2)
|
#fig.colorbar(pic2)
|
||||||
plt.tight_layout()
|
#plt.tight_layout()
|
||||||
plt.show()
|
#plt.show()
|
||||||
|
|
||||||
# calculate l2 error
|
# calculate l2 error
|
||||||
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||||
|
|||||||
Reference in New Issue
Block a user