fix tutorial poisson

This commit is contained in:
Nicola Demo
2022-11-29 13:01:36 +01:00
parent 936f5e1043
commit e6f935842e
17 changed files with 1240 additions and 1176 deletions

3
tutorials/README.md vendored
View File

@@ -5,6 +5,7 @@ In this folder we collect useful tutorials in order to understand the principles
| Name | Description | Type of Problem |
|-------|---------------|-------------------|
| Tutorial1 [[.ipynb](tutorial1/tutorial-1.ipynb), [.py](tutorial1/tutorial-1.py), [.html](http://mathlab.github.io/PINA/_rst/tutorial1/tutorial-1.html)]| Poisson problem on regular domain using extra features | `SpatialProblem` |
| Tutorial1 [[.ipynb](), [.py](), [.html]()]| Coming soon | |
| Tutorial2 [[.ipynb](tutorial2/tutorial.ipynb), [.py](tutorial2/tutorial.py), [.html](http://mathlab.github.io/PINA/_rst/tutorial1/tutorial-1.html)]| Poisson problem on regular domain using extra features | `SpatialProblem` |

File diff suppressed because one or more lines are too long

View File

@@ -1,263 +0,0 @@
#!/usr/bin/env python
# coding: utf-8
# This tutorial presents how to solve with Physics-Informed Neural Networks a 2-D Poisson problem with Dirichlet boundary conditions.
# We consider a Poisson problem with a sinusoidal forcing term, in the square domain D = [0, 1]*[0, 1], with boundaries gamma1, gamma2, gamma3, gamma4.
# First of all, some useful imports.
import os
import numpy as np
import argparse
import sys
import torch
from torch.nn import ReLU, Tanh, Softplus
from pina.problem import SpatialProblem
from pina.operators import nabla
from pina.model import FeedForward
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
from pina import Condition, Span, PINN, LabelTensor, Plotter
# Now, the Poisson problem is written in PINA code as a class. The equations are written as that should be satisfied in the corresponding domains. truth_solution is the exact solution which will be compared with the predicted one.
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
torch.sin(input_.extract(['y'])*torch.pi))
nabla_u = nabla(output_, input_, components=['u'], d=['x', 'y'])
return nabla_u - force_term
def nil_dirichlet(input_, output_):
value = 0.0
return output_.extract(['u']) - value
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1]}), nil_dirichlet),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1]}), laplace_equation),
}
def poisson_sol(self, pts):
return -(
torch.sin(pts.extract(['x'])*torch.pi)*
torch.sin(pts.extract(['y'])*torch.pi)
)/(2*torch.pi**2)
#return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
truth_solution = poisson_sol
# Then, a feed-forward neural network is defined, through the class FeedForward. A 2-D grid is instantiated inside the square domain and on the boundaries. This neural network takes as input the coordinates of the points which compose the grid and gives as output the solution of the Poisson problem. The residual of the equations are evaluated at each point of the grid and the loss minimized by the neural network is the sum of the residuals.
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 5000 epochs with a learning rate of 0.003. These parameters can be modified as desired.
# The output of the cell below is the final loss of the training phase of the PINN.
poisson_problem = Poisson()
model = FeedForward(layers=[10, 10],
output_variables=poisson_problem.output_variables,
input_variables=poisson_problem.input_variables)
pinn = PINN(poisson_problem, model, lr=0.003, regularizer=1e-8)
pinn.span_pts(20, 'grid', ['D'])
pinn.span_pts(20, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn.train(5000, 100)
# The loss trend is saved in a dedicated txt file located in 'tutorial1_files'.
os.mkdir('tutorial1_files')
with open('tutorial1_files/poisson_history.txt', 'w') as file_:
for i, losses in enumerate(pinn.history):
file_.write('{} {}\n'.format(i, sum(losses)))
pinn.save_state('tutorial1_files/pina.poisson')
# Now the Plotter class is used to plot the results.
# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed.
plotter = Plotter()
plotter.plot(pinn)
# Now, the same problem is solved in a different way.
# A new neural network is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation.
# The set of input variables to the neural network is:
# [x, y, k(x,y)],
# where x and y are the coordinates of the points of the grid and k(x, y) is the forcing term evaluated at the grid points.
# This forcing term is initialized in the class 'myFeature', the output of the cell below is also in this case the final loss of PINN.
class myFeature(torch.nn.Module):
"""
Feature: sin(x)
"""
def __init__(self):
super(myFeature, self).__init__()
def forward(self, x):
t = (torch.sin(x.extract(['x'])*torch.pi) *
torch.sin(x.extract(['y'])*torch.pi))
return LabelTensor(t, ['sin(x)sin(y)'])
feat = [myFeature()]# if args.features else []
poisson_problem = Poisson()
model_feat = FeedForward(
layers=[20, 20],
output_variables=poisson_problem.output_variables,
input_variables=poisson_problem.input_variables,
func=Softplus,
extra_features=feat
)
pinn_feat = PINN(
poisson_problem,
model_feat,
lr=0.03,
error_norm='mse',
regularizer=1e-8)
pinn_feat.span_pts(20, 'grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn_feat.span_pts(20, 'grid', locations=['D'])
pinn_feat.train(5000, 100)
pinn_feat.save_state('pina.poisson')
# The losses are saved in a txt file as for the basic Poisson case.
with open('tutorial1_files/poisson_history_feat.txt', 'w') as file_:
for i, losses in enumerate(pinn_feat.history):
file_.write('{} {}\n'.format(i, sum(losses)))
pinn_feat.save_state('tutorial1_files/pina.poisson_feat')
# The predicted and exact solutions and the error between them are represented below.
plotter_feat = Plotter()
plotter_feat.plot(pinn_feat)
# Another way to predict the solution is to add a parametric forcing term of the Laplace equation as an extra-feature. The parameters added in the expression of the extra-feature are learned during the training phase of the neural network.
# The new Poisson problem is defined in the dedicated class 'ParametricPoisson', where the domain is no more only spatial, but includes the parameters' space. In our case, the parameters' bounds are 0 and 30.
from pina.problem import ParametricProblem
class ParametricPoisson(SpatialProblem, ParametricProblem):
bounds_x = [0, 1]
bounds_y = [0, 1]
bounds_alpha = [0, 30]
bounds_beta = [0, 30]
spatial_variables = ['x', 'y']
parameters = ['alpha', 'beta']
output_variables = ['u']
spatial_domain = Span({'x': bounds_x, 'y': bounds_y})
parameter_domain = Span({'alpha': bounds_alpha, 'beta': bounds_beta})
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
torch.sin(input_.extract(['y'])*torch.pi))
nabla_u = nabla(output_, input_, components=['u'], d=['x', 'y'])
return nabla_u - force_term
def nil_dirichlet(input_, output_):
value = 0.0
return output_.extract(['u']) - value
conditions = {
'gamma1': Condition(
Span({'x': bounds_x, 'y': bounds_y[1], 'alpha': bounds_alpha, 'beta': bounds_beta}),
nil_dirichlet),
'gamma2': Condition(
Span({'x': bounds_x, 'y': bounds_y[0], 'alpha': bounds_alpha, 'beta': bounds_beta}),
nil_dirichlet),
'gamma3': Condition(
Span({'x': bounds_x[1], 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
nil_dirichlet),
'gamma4': Condition(
Span({'x': bounds_x[0], 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
nil_dirichlet),
'D': Condition(
Span({'x': bounds_x, 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
laplace_equation),
}
def poisson_sol(self, pts):
return -(
torch.sin(pts.extract(['x'])*torch.pi)*
torch.sin(pts.extract(['y'])*torch.pi)
)/(2*torch.pi**2)
# Here, as done for the other cases, the new parametric feature is defined and the neural network is re-initialized and trained, considering as two additional parameters alpha and beta.
param_poisson_problem = ParametricPoisson()
class myFeature(torch.nn.Module):
"""
"""
def __init__(self):
super(myFeature, self).__init__()
def forward(self, x):
t = (x.extract(['beta'])*torch.sin(x.extract(['alpha'])*x.extract(['x'])*torch.pi)*
torch.sin(x.extract(['alpha'])*x.extract(['y'])*torch.pi))
return LabelTensor(t, ['b*sin(a*x)sin(a*y)'])
feat = [myFeature()]
model_learn = FeedForward(layers=[10, 10],
output_variables=param_poisson_problem.output_variables,
input_variables=param_poisson_problem.input_variables,
extra_features=feat)
pinn_learn = PINN(param_poisson_problem, model_learn, lr=0.003, regularizer=1e-8)
pinn_learn.span_pts(20, 'grid', ['D'])
pinn_learn.span_pts(20, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn_learn.train(5000, 100)
# The losses are saved as for the other two cases trained above.
with open('tutorial1_files/poisson_history_learn_feat.txt', 'w') as file_:
for i, losses in enumerate(pinn_learn.history):
file_.write('{} {}\n'.format(i, sum(losses)))
pinn_learn.save_state('tutorial1_files/pina.poisson_learn_feat')
# Here the plots for the prediction error (below on the right) shows that the prediction coming from the parametric PINN is more accurate than the one of the basic version of PINN.
plotter_learn = Plotter()
plotter_learn.plot(pinn_learn)
# Now the files containing the loss trends for the three cases are read. The loss histories are compared; we can see that the loss decreases faster in the cases of PINN with extra-feature.
import pandas as pd
df = pd.read_csv("tutorial1_files/poisson_history.txt", sep=" ", header=None)
epochs = df[0]
poisson_data = epochs.to_numpy()*100
basic = df[1].to_numpy()
df_feat = pd.read_csv("tutorial1_files/poisson_history_feat.txt", sep=" ", header=None)
feat = df_feat[1].to_numpy()
df_learn = pd.read_csv("tutorial1_files/poisson_history_learn_feat.txt", sep=" ", header=None)
learn_feat = df_learn[1].to_numpy()
import matplotlib.pyplot as plt
plt.semilogy(epochs, basic, label='Basic PINN')
plt.semilogy(epochs, feat, label='PINN with extra-feature')
plt.semilogy(epochs, learn_feat, label='PINN with learnable extra-feature')
plt.legend()
plt.grid()
plt.show()

578
tutorials/tutorial2/tutorial.ipynb vendored Normal file

File diff suppressed because one or more lines are too long

249
tutorials/tutorial2/tutorial.py vendored Normal file
View File

@@ -0,0 +1,249 @@
#!/usr/bin/env python
# coding: utf-8
# # Tutorial 2: resolution of Poisson problem and usage of extra-features
# ### The problem definition
# This tutorial presents how to solve with Physics-Informed Neural Networks a 2D Poisson problem with Dirichlet boundary conditions.
#
# The problem is written as:
# \begin{equation}
# \begin{cases}
# \Delta u = \sin{(\pi x)} \sin{(\pi y)} \text{ in } D, \\
# u = 0 \text{ on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4,
# \end{cases}
# \end{equation}
# where $D$ is a square domain $[0,1]^2$, and $\Gamma_i$, with $i=1,...,4$, are the boundaries of the square.
# First of all, some useful imports.
# In[1]:
import torch
from torch.nn import ReLU, Tanh, Softplus
from pina.problem import SpatialProblem
from pina.operators import nabla
from pina.model import FeedForward
from pina import Condition, Span, PINN, LabelTensor, Plotter
# Now, the Poisson problem is written in PINA code as a class. The equations are written as *conditions* that should be satisfied in the corresponding domains. *truth_solution*
# is the exact solution which will be compared with the predicted one.
# In[2]:
class Poisson(SpatialProblem):
output_variables = ['u']
spatial_domain = Span({'x': [0, 1], 'y': [0, 1]})
def laplace_equation(input_, output_):
force_term = (torch.sin(input_.extract(['x'])*torch.pi) *
torch.sin(input_.extract(['y'])*torch.pi))
nabla_u = nabla(output_, input_, components=['u'], d=['x', 'y'])
return nabla_u - force_term
def nil_dirichlet(input_, output_):
value = 0.0
return output_.extract(['u']) - value
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1]}), nil_dirichlet),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1]}), laplace_equation),
}
def poisson_sol(self, pts):
return -(
torch.sin(pts.extract(['x'])*torch.pi)*
torch.sin(pts.extract(['y'])*torch.pi)
)/(2*torch.pi**2)
truth_solution = poisson_sol
# ### The problem solution
# After the problem, the feed-forward neural network is defined, through the class `FeedForward`. This neural network takes as input the coordinates (in this case $x$ and $y$) and provides the unkwown field of the Poisson problem. The residual of the equations are evaluated at several sampling points (which the user can manipulate using the method `span_pts`) and the loss minimized by the neural network is the sum of the residuals.
#
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs with a learning rate of 0.006. These parameters can be modified as desired.
# The output of the cell below is the final loss of the training phase of the PINN.
# We highlight that the generation of the sampling points and the train is here encapsulated within the function `generate_samples_and_train`, but only for saving some lines of code in the next cells; that function is not mandatory in the **PINA** framework.
# In[3]:
def generate_samples_and_train(model, problem):
pinn = PINN(problem, model, lr=0.006, regularizer=1e-8)
pinn.span_pts(20, 'grid', locations=['D'])
pinn.span_pts(20, 'grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn.train(1000, 100)
return pinn
problem = Poisson()
model = FeedForward(
layers=[10, 10],
func=Softplus,
output_variables=problem.output_variables,
input_variables=problem.input_variables
)
pinn = generate_samples_and_train(model, problem)
# The neural network of course can be saved in a file. In such a way, we can store it after the train, and load it just to infer the field. Here we don't store the model, but for demonstrative purposes we put in the next cell the commented line of code.
# In[4]:
# pinn.save_state('pina.poisson')
# Now the *Plotter* class is used to plot the results.
# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed.
# In[5]:
plotter = Plotter()
plotter.plot(pinn)
# ### The problem solution with extra-features
# Now, the same problem is solved in a different way.
# A new neural network is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation.
# The set of input variables to the neural network is:
#
# \begin{equation}
# [x, y, k(x, y)], \text{ with } k(x, y)=\sin{(\pi x)}\sin{(\pi y)},
# \end{equation}
#
# where $x$ and $y$ are the spatial coordinates and $k(x, y)$ is the added feature.
#
# This feature is initialized in the class `SinSin`, which needs to be inherited by the `torch.nn.Module` class and to have the `forward` method. After declaring such feature, we can just incorporate in the `FeedForward` class thanks to the `extra_features` argument.
# **NB**: `extra_features` always needs a `list` as input, you you have one feature just encapsulated it in a class, as in the next cell.
#
# Finally, we perform the same training as before: the problem is `Poisson`, the network is composed by the same number of neurons and optimizer parameters are equal to previous test, the only change is the new extra feature.
# In[6]:
class SinSin(torch.nn.Module):
"""Feature: sin(x)*sin(y)"""
def __init__(self):
super().__init__()
def forward(self, x):
t = (torch.sin(x.extract(['x'])*torch.pi) *
torch.sin(x.extract(['y'])*torch.pi))
return LabelTensor(t, ['sin(x)sin(y)'])
model_feat = FeedForward(
layers=[10, 10],
output_variables=problem.output_variables,
input_variables=problem.input_variables,
func=Softplus,
extra_features=[SinSin()]
)
pinn_feat = generate_samples_and_train(model_feat, problem)
# The predicted and exact solutions and the error between them are represented below.
# We can easily note that now our network, having almost the same condition as before, is able to reach an additional order of magnitude in accuracy.
# In[7]:
plotter.plot(pinn_feat)
# ### The problem solution with learnable extra-features
# We can still do better!
#
# Another way to exploit the extra features is the addition of learnable parameter inside them.
# In this way, the added parameters are learned during the training phase of the neural network. In this case, we use:
#
# \begin{equation}
# k(x, \mathbf{y}) = \beta \sin{(\alpha x)} \sin{(\alpha y)},
# \end{equation}
#
# where $\alpha$ and $\beta$ are the abovementioned parameters.
# Their implementation is quite trivial: by using the class `torch.nn.Parameter` we cam define all the learnable parameters we need, and they are managed by `autograd` module!
# In[8]:
class SinSinAB(torch.nn.Module):
""" """
def __init__(self):
super().__init__()
self.alpha = torch.nn.Parameter(torch.tensor([1.0]))
self.beta = torch.nn.Parameter(torch.tensor([1.0]))
def forward(self, x):
t = (
self.beta*torch.sin(self.alpha*x.extract(['x'])*torch.pi)*
torch.sin(self.alpha*x.extract(['y'])*torch.pi)
)
return LabelTensor(t, ['b*sin(a*x)sin(a*y)'])
model_learn = FeedForward(
layers=[10, 10],
output_variables=problem.output_variables,
input_variables=problem.input_variables,
extra_features=[SinSinAB()]
)
pinn_learn = generate_samples_and_train(model_learn, problem)
# Umh, the final loss is not appreciabily better than previous model (with static extra features), despite the usage of learnable parameters. This is mainly due to the over-parametrization of the network: there are many parameter to optimize during the training, and the model in unable to understand automatically that only the parameters of the extra feature (and not the weights/bias of the FFN) should be tuned in order to fit our problem. A longer training can be helpful, but in this case the faster way to reach machine precision for solving the Poisson problem is removing all the hidden layers in the `FeedForward`, keeping only the $\alpha$ and $\beta$ parameters of the extra feature.
# In[9]:
model_learn = FeedForward(
layers=[],
output_variables=problem.output_variables,
input_variables=problem.input_variables,
extra_features=[SinSinAB()]
)
pinn_learn = generate_samples_and_train(model_learn, problem)
# In such a way, the model is able to reach a very high accuracy!
# Of course, this is a toy problem for understanding the usage of extra features: similar precision could be obtained if the extra features are very similar to the true solution. The analyzed Poisson problem shows a forcing term very close to the solution, resulting in a perfect problem to address with such an approach.
#
# We conclude here by showing the graphical comparison of the unknown field and the loss trend for all the test cases presented here: the standard PINN, PINN with extra features, and PINN with learnable extra features.
# In[10]:
plotter.plot(pinn_learn)
# In[11]:
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 6))
plotter.plot_loss(pinn, label='Standard')
plotter.plot_loss(pinn_feat, label='Static Features')
plotter.plot_loss(pinn_learn, label='Learnable Features')
plt.grid()
plt.legend()
plt.show()