Add plot in tutorials 1,3,4,9
This commit is contained in:
committed by
Nicola Demo
parent
18edb4003e
commit
10ea59e15a
205
tutorials/tutorial1/tutorial.ipynb
vendored
205
tutorials/tutorial1/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
52
tutorials/tutorial1/tutorial.py
vendored
52
tutorials/tutorial1/tutorial.py
vendored
@@ -89,7 +89,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
|
||||
#
|
||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||
|
||||
# In[ ]:
|
||||
# In[2]:
|
||||
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
@@ -167,7 +167,7 @@ problem.discretise_domain(n=20, mode='random')
|
||||
|
||||
|
||||
# sampling for training
|
||||
problem.discretise_domain(20, 'random', domains=['x0']) # TODO check
|
||||
problem.discretise_domain(1, 'random', domains=['x0']) # TODO check
|
||||
problem.discretise_domain(20, 'lh', domains=['D'])
|
||||
|
||||
|
||||
@@ -180,28 +180,32 @@ print('Input points:', problem.discretised_domains)
|
||||
print('Input points labels:', problem.discretised_domains['D'].labels)
|
||||
|
||||
|
||||
# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class
|
||||
# To visualize the sampled points we can use `matplotlib.pyplot`:
|
||||
|
||||
# In[6]:
|
||||
|
||||
|
||||
#from pina import Plotter
|
||||
|
||||
#pl = Plotter()
|
||||
#pl.plot_samples(problem=problem)
|
||||
import matplotlib.pyplot as plt
|
||||
variables=problem.spatial_variables
|
||||
fig = plt.figure()
|
||||
proj = "3d" if len(variables) == 3 else None
|
||||
ax = fig.add_subplot(projection=proj)
|
||||
for location in problem.input_pts:
|
||||
coords = problem.input_pts[location].extract(variables).T.detach()
|
||||
ax.plot(coords.flatten(),torch.zeros(coords.flatten().shape),".",label=location)
|
||||
|
||||
|
||||
# ## Perform a small training
|
||||
|
||||
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
|
||||
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
|
||||
|
||||
# In[7]:
|
||||
|
||||
|
||||
from pina import Trainer
|
||||
from pina.solvers import PINN
|
||||
from pina.solver import PINN
|
||||
from pina.model import FeedForward
|
||||
from pina.callbacks import MetricTracker
|
||||
from pina.callback import MetricTracker
|
||||
|
||||
|
||||
# build the model
|
||||
@@ -229,15 +233,22 @@ trainer.train()
|
||||
|
||||
# inspecting final loss
|
||||
trainer.logged_metrics
|
||||
print(type(problem.truth_solution))
|
||||
|
||||
|
||||
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.
|
||||
# By using `matplotlib` we can also do some qualitative plots of the solution.
|
||||
|
||||
# In[9]:
|
||||
|
||||
|
||||
# plotting the solution
|
||||
#pl.plot(solver=pinn)
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach()
|
||||
pts = pts.cpu()
|
||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
|
||||
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
|
||||
ax.plot(pts.extract(['x']), true_output, label='True solution')
|
||||
plt.legend()
|
||||
|
||||
|
||||
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
|
||||
@@ -245,7 +256,20 @@ trainer.logged_metrics
|
||||
# In[10]:
|
||||
|
||||
|
||||
#pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
|
||||
list_ = [
|
||||
idx for idx, s in enumerate(trainer.callbacks)
|
||||
if isinstance(s, MetricTracker)
|
||||
]
|
||||
print(list_[0])
|
||||
trainer_metrics = trainer.callbacks[list_[0]].metrics
|
||||
|
||||
loss = trainer_metrics['val_loss']
|
||||
epochs = range(len(loss))
|
||||
plt.plot(epochs, loss.cpu())
|
||||
# plotting
|
||||
plt.xlabel('epoch')
|
||||
plt.ylabel('loss')
|
||||
plt.yscale('log')
|
||||
|
||||
|
||||
# As we can see the loss has not reached a minimum, suggesting that we could train for longer
|
||||
|
||||
280
tutorials/tutorial3/tutorial.ipynb
vendored
280
tutorials/tutorial3/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
171
tutorials/tutorial3/tutorial.py
vendored
171
tutorials/tutorial3/tutorial.py
vendored
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# First of all, some useful imports.
|
||||
|
||||
# In[1]:
|
||||
# In[12]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -23,6 +23,7 @@ if IN_COLAB:
|
||||
|
||||
import torch
|
||||
|
||||
import matplotlib.pylab as plt
|
||||
from pina.problem import SpatialProblem, TimeDependentProblem
|
||||
from pina.operator import laplacian, grad
|
||||
from pina.domain import CartesianDomain
|
||||
@@ -30,7 +31,7 @@ from pina.solver import PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.equation import Equation
|
||||
from pina.equation.equation_factory import FixedValue
|
||||
from pina import Condition
|
||||
from pina import Condition, LabelTensor
|
||||
|
||||
|
||||
# ## The problem definition
|
||||
@@ -49,7 +50,7 @@ from pina import Condition
|
||||
|
||||
# Now, the wave problem is written in PINA code as a class, inheriting from `SpatialProblem` and `TimeDependentProblem` since we deal with spatial, and time dependent variables. The equations are written as `conditions` that should be satisfied in the corresponding domains. `truth_solution` is the exact solution which will be compared with the predicted one.
|
||||
|
||||
# In[2]:
|
||||
# In[13]:
|
||||
|
||||
|
||||
class Wave(TimeDependentProblem, SpatialProblem):
|
||||
@@ -95,7 +96,7 @@ problem = Wave()
|
||||
#
|
||||
# where $NN$ is the neural net output. This neural network takes as input the coordinates (in this case $x$, $y$ and $t$) and provides the unknown field $u$. By construction, it is zero on the boundaries. The residuals of the equations are evaluated at several sampling points (which the user can manipulate using the method `discretise_domain`) and the loss minimized by the neural network is the sum of the residuals.
|
||||
|
||||
# In[3]:
|
||||
# In[14]:
|
||||
|
||||
|
||||
class HardMLP(torch.nn.Module):
|
||||
@@ -119,7 +120,7 @@ class HardMLP(torch.nn.Module):
|
||||
|
||||
# In this tutorial, the neural network is trained for 1000 epochs with a learning rate of 0.001 (default in `PINN`). Training takes approximately 3 minutes.
|
||||
|
||||
# In[4]:
|
||||
# In[15]:
|
||||
|
||||
|
||||
# generate the data
|
||||
@@ -135,22 +136,88 @@ trainer.train()
|
||||
|
||||
# Notice that the loss on the boundaries of the spatial domain is exactly zero, as expected! After the training is completed one can now plot some results using the `Plotter` class of **PINA**.
|
||||
|
||||
# In[5]:
|
||||
# In[16]:
|
||||
|
||||
|
||||
#plotter = Plotter()
|
||||
|
||||
# plotting at fixed time t = 0.0
|
||||
#print('Plotting at t=0')
|
||||
print('Plotting at t=0')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 0.0})
|
||||
|
||||
fixed_variables={'t': 0.0}
|
||||
method='contourf'
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
# plotting at fixed time t = 0.5
|
||||
#print('Plotting at t=0.5')
|
||||
print('Plotting at t=0.5')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 0.5})
|
||||
|
||||
fixed_variables={'t': 0.5}
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
# plotting at fixed time t = 1.
|
||||
#print('Plotting at t=1')
|
||||
print('Plotting at t=1')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 1.0})
|
||||
fixed_variables={'t': 1.0}
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
|
||||
|
||||
# The results are not so great, and we can clearly see that as time progress the solution gets worse.... Can we do better?
|
||||
@@ -161,7 +228,7 @@ trainer.train()
|
||||
#
|
||||
# Let us build the network first
|
||||
|
||||
# In[6]:
|
||||
# In[17]:
|
||||
|
||||
|
||||
class HardMLPtime(torch.nn.Module):
|
||||
@@ -184,7 +251,7 @@ class HardMLPtime(torch.nn.Module):
|
||||
|
||||
# Now let's train with the same configuration as thre previous test
|
||||
|
||||
# In[7]:
|
||||
# In[18]:
|
||||
|
||||
|
||||
# generate the data
|
||||
@@ -200,22 +267,88 @@ trainer.train()
|
||||
|
||||
# We can clearly see that the loss is way lower now. Let's plot the results
|
||||
|
||||
# In[8]:
|
||||
# In[19]:
|
||||
|
||||
|
||||
#plotter = Plotter()
|
||||
|
||||
# plotting at fixed time t = 0.0
|
||||
#print('Plotting at t=0')
|
||||
print('Plotting at t=0')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 0.0})
|
||||
|
||||
fixed_variables={'t': 0.0}
|
||||
method='contourf'
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
# plotting at fixed time t = 0.5
|
||||
#print('Plotting at t=0.5')
|
||||
print('Plotting at t=0.5')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 0.5})
|
||||
|
||||
fixed_variables={'t': 0.5}
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
# plotting at fixed time t = 1.
|
||||
#print('Plotting at t=1')
|
||||
print('Plotting at t=1')
|
||||
#plotter.plot(pinn, fixed_variables={'t': 1.0})
|
||||
fixed_variables={'t': 1.0}
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables=['x','y'])
|
||||
fixed_pts = torch.ones(pts.shape[0], len(fixed_variables))
|
||||
fixed_pts *= torch.tensor(list(fixed_variables.values()))
|
||||
fixed_pts = fixed_pts.as_subclass(LabelTensor)
|
||||
fixed_pts.labels = list(fixed_variables.keys())
|
||||
pts = pts.append(fixed_pts)
|
||||
pts = pts.to(device=pinn.device)
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach().reshape(256,256)
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach().reshape(256,256)
|
||||
pts = pts.cpu()
|
||||
grids = [p_.reshape(256, 256) for p_ in pts.extract(['x','y']).T]
|
||||
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
cb = getattr(ax[0], method)(*grids, predicted_output)
|
||||
fig.colorbar(cb, ax=ax[0])
|
||||
ax[0].title.set_text('Neural Network prediction')
|
||||
cb = getattr(ax[1], method)(*grids, true_output)
|
||||
fig.colorbar(cb, ax=ax[1])
|
||||
ax[1].title.set_text('True solution')
|
||||
cb = getattr(ax[2],method)(*grids,(true_output - predicted_output))
|
||||
fig.colorbar(cb, ax=ax[2])
|
||||
ax[2].title.set_text('Residual')
|
||||
|
||||
|
||||
# We can see now that the results are way better! This is due to the fact that previously the network was not learning correctly the initial conditon, leading to a poor solution when time evolved. By imposing the initial condition the network is able to correctly solve the problem.
|
||||
|
||||
100
tutorials/tutorial4/tutorial.ipynb
vendored
100
tutorials/tutorial4/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
60
tutorials/tutorial4/tutorial.py
vendored
60
tutorials/tutorial4/tutorial.py
vendored
@@ -530,16 +530,16 @@ net.eval()
|
||||
output = net(input_data).detach()
|
||||
|
||||
# visualize data
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid[:, 0], grid[:, 1], c=input_data[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid[:, 0], grid[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
# As we can see, the two solutions are really similar! We can compute the $l_2$ error quite easily as well:
|
||||
@@ -579,16 +579,16 @@ latent = net.encoder(input_data)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
# axes[1].set_title("Up-sampling")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Up-sampling")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
|
||||
# As we can see we have a very good approximation of the original function, even thought some noise is present. Let's calculate the error now:
|
||||
@@ -621,16 +621,16 @@ latent = net.encoder(input_data2)
|
||||
output = net.decoder(latent, input_data2).detach()
|
||||
|
||||
# show the picture
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
#pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
#axes[0].set_title("Real")
|
||||
#fig.colorbar(pic1)
|
||||
#plt.subplot(1, 2, 2)
|
||||
#pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
#axes[1].set_title("Autoencoder not re-trained")
|
||||
#fig.colorbar(pic2)
|
||||
#plt.tight_layout()
|
||||
#plt.show()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 3))
|
||||
pic1 = axes[0].scatter(grid2[:, 0], grid2[:, 1], c=input_data2[0, 0, :, -1])
|
||||
axes[0].set_title("Real")
|
||||
fig.colorbar(pic1)
|
||||
plt.subplot(1, 2, 2)
|
||||
pic2 = axes[1].scatter(grid2[:, 0], grid2[:, 1], c=output[0, 0, :, -1])
|
||||
axes[1].set_title("Autoencoder not re-trained")
|
||||
fig.colorbar(pic2)
|
||||
plt.tight_layout()
|
||||
plt.show()
|
||||
|
||||
# calculate l2 error
|
||||
print(f'l2 error: {l2_error(input_data2[0, 0, :, -1], output[0, 0, :, -1]):.2%}')
|
||||
|
||||
41
tutorials/tutorial9/tutorial.ipynb
vendored
41
tutorials/tutorial9/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
12
tutorials/tutorial9/tutorial.py
vendored
12
tutorials/tutorial9/tutorial.py
vendored
@@ -29,7 +29,7 @@ if IN_COLAB:
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('tableau-colorblind10')
|
||||
from pina import Condition#,Plotter as pl
|
||||
from pina import Condition
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operator import laplacian
|
||||
from pina.model import FeedForward
|
||||
@@ -154,8 +154,14 @@ trainer.train()
|
||||
# In[5]:
|
||||
|
||||
|
||||
#pl = Plotter()
|
||||
#pl.plot(pinn)
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
|
||||
predicted_output = pinn.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
|
||||
true_output = pinn.problem.truth_solution(pts).cpu().detach()
|
||||
pts = pts.cpu()
|
||||
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
|
||||
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
|
||||
ax.plot(pts.extract(['x']), true_output, label='True solution')
|
||||
plt.legend()
|
||||
|
||||
|
||||
# Great, they overlap perfectly! This seems a good result, considering the simple neural network used to some this (complex) problem. We will now test the neural network on the domain $[-4, 4]$ without retraining. In principle the periodicity should be present since the $v$ function ensures the periodicity in $(-\infty, \infty)$.
|
||||
|
||||
Reference in New Issue
Block a user