Add plot in tutorials 1,3,4,9

This commit is contained in:
Matteo Bertocchi
2025-02-27 19:01:05 +01:00
committed by Nicola Demo
parent 18edb4003e
commit 10ea59e15a
8 changed files with 704 additions and 217 deletions

File diff suppressed because one or more lines are too long

View File

@@ -530,16 +530,16 @@ net.eval()
output = net(input_data).detach()
# visualize data
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
#axes[0].set_title("Real")
#fig.colorbar(pic1)
#plt.subplot(1, 2, 2)
#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
#axes[1].set_title("Autoencoder")
#fig.colorbar(pic2)
#plt.tight_layout()
#plt.show()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
axes[0].set_title("Real")
fig.colorbar(pic1)
plt.subplot(1, 2, 2)
pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
axes[1].set_title("Autoencoder")
fig.colorbar(pic2)
plt.tight_layout()
plt.show()
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
@@ -579,16 +579,16 @@ latent = net.encoder(input_data)
output = net.decoder(latent, input_data2).detach()
# show the picture
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
#axes[0].set_title("Real")
#fig.colorbar(pic1)
#plt.subplot(1, 2, 2)
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
# axes[1].set_title("Up-sampling")
#fig.colorbar(pic2)
#plt.tight_layout()
#plt.show()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
axes[0].set_title("Real")
fig.colorbar(pic1)
plt.subplot(1, 2, 2)
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
axes[1].set_title("Up-sampling")
fig.colorbar(pic2)
plt.tight_layout()
plt.show()
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
@@ -621,16 +621,16 @@ latent = net.encoder(input_data2)
output = net.decoder(latent, input_data2).detach()
# show the picture
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
#axes[0].set_title("Real")
#fig.colorbar(pic1)
#plt.subplot(1, 2, 2)
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
#axes[1].set_title("Autoencoder not re-trained")
#fig.colorbar(pic2)
#plt.tight_layout()
#plt.show()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
axes[0].set_title("Real")
fig.colorbar(pic1)
plt.subplot(1, 2, 2)
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
axes[1].set_title("Autoencoder not re-trained")
fig.colorbar(pic2)
plt.tight_layout()
plt.show()
# calculate l2 error
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')