Update tutorials 1 through 12 to current version 0.2
This commit is contained in:
committed by
Nicola Demo
parent
8b797d589a
commit
d83ca3af6e
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-images-idx3-ubyte
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-images-idx3-ubyte
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-images-idx3-ubyte.gz
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-images-idx3-ubyte.gz
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-labels-idx1-ubyte
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-labels-idx1-ubyte
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-labels-idx1-ubyte.gz
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/t10k-labels-idx1-ubyte.gz
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/train-images-idx3-ubyte
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/train-images-idx3-ubyte
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/train-images-idx3-ubyte.gz
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/train-images-idx3-ubyte.gz
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/train-labels-idx1-ubyte
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/train-labels-idx1-ubyte
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial4/data/MNIST/raw/train-labels-idx1-ubyte.gz
vendored
Normal file
BIN
tutorials/tutorial4/data/MNIST/raw/train-labels-idx1-ubyte.gz
vendored
Normal file
Binary file not shown.
182
tutorials/tutorial4/tutorial.ipynb
vendored
182
tutorials/tutorial4/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
82
tutorials/tutorial4/tutorial.py
vendored
82
tutorials/tutorial4/tutorial.py
vendored
@@ -9,7 +9,7 @@
|
||||
|
||||
# First of all we import the modules needed for the tutorial:
|
||||
|
||||
# In[ ]:
|
||||
# In[1]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -476,7 +476,7 @@ class Decoder(torch.nn.Module):
|
||||
|
||||
# Very good! Notice that in the `Decoder` class in the `forward` pass we have used the `.transpose()` method of the `ContinuousConvolution` class. This method accepts the `weights` for upsampling and the `grid` on where to upsample. Let's now build the autoencoder! We set the hidden dimension in the `hidden_dimension` variable. We apply the sigmoid on the output since the field value is between $[0, 1]$.
|
||||
|
||||
# In[17]:
|
||||
# In[14]:
|
||||
|
||||
|
||||
class Autoencoder(torch.nn.Module):
|
||||
@@ -500,17 +500,18 @@ net = Autoencoder()
|
||||
|
||||
# Let's now train the autoencoder, minimizing the mean square error loss and optimizing using Adam. We use the `SupervisedSolver` as solver, and the problem is a simple problem created by inheriting from `AbstractProblem`. It takes approximately two minutes to train on CPU.
|
||||
|
||||
# In[19]:
|
||||
# In[15]:
|
||||
|
||||
|
||||
# define the problem
|
||||
class CircleProblem(AbstractProblem):
|
||||
input_variables = ['x', 'y', 'f']
|
||||
output_variables = input_variables
|
||||
al=LabelTensor(input_data, input_variables)
|
||||
conditions = {'data' : Condition(input_points=LabelTensor(input_data, input_variables), output_points=LabelTensor(input_data, output_variables))}
|
||||
|
||||
# define the solver
|
||||
solver = SupervisedSolver(problem=CircleProblem(), model=net, loss=torch.nn.MSELoss())
|
||||
solver = SupervisedSolver(problem=CircleProblem(), model=net, loss=torch.nn.MSELoss(), use_lt=True)
|
||||
|
||||
# train
|
||||
trainer = Trainer(solver, max_epochs=150, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
@@ -520,7 +521,7 @@ trainer.train()
|
||||
|
||||
# Let's visualize the two solutions side by side!
|
||||
|
||||
# In[20]:
|
||||
# In[16]:
|
||||
|
||||
|
||||
net.eval()
|
||||
@@ -529,21 +530,21 @@ net.eval()
|
||||
output = net(input_data).detach()
|
||||
|
||||
# visualize data
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
|
||||
|
||||
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
|
||||
|
||||
# In[21]:
|
||||
# In[17]:
|
||||
|
||||
|
||||
def l2_error(input_, target):
|
||||
@@ -559,7 +560,7 @@ print(f'l2 error: {l2_error(input_data[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
#
|
||||
# Suppose we have already the hidden representation and we want to upsample on a differen grid with more points. Let's see how to do it:
|
||||
|
||||
# In[22]:
|
||||
# In[18]:
|
||||
|
||||
|
||||
# setting the seed
|
||||
@@ -578,21 +579,21 @@ latent = net.encoder(input_data)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Up-sampling")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
# axes[1].set_title("Up-sampling")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
|
||||
|
||||
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
|
||||
|
||||
# In[23]:
|
||||
# In[19]:
|
||||
|
||||
|
||||
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
@@ -601,7 +602,7 @@ print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}'
|
||||
# ### Autoencoding at different resolutions
|
||||
# In the previous example we already had the hidden representation (of the original input) and we used it to upsample. Sometimes however we could have a finer mesh solution and we would simply want to encode it. This can be done without retraining! This procedure can be useful in case we have many points in the mesh and just a smaller part of them are needed for training. Let's see the results of this:
|
||||
|
||||
# In[ ]:
|
||||
# In[20]:
|
||||
|
||||
|
||||
# setting the seed
|
||||
@@ -620,20 +621,19 @@ latent = net.encoder(input_data2)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder not re-trained")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder not re-trained")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
|
||||
# calculate l2 error
|
||||
print(
|
||||
f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
|
||||
|
||||
# ## What's next?
|
||||
|
||||
Reference in New Issue
Block a user