Tutorial (#91)

* tutorial update
This commit is contained in:
Dario Coscia
2023-05-08 16:19:59 +02:00
committed by GitHub
parent 2382ef55cd
commit 9de4e515f4
15 changed files with 244 additions and 528 deletions

View File

@@ -136,8 +136,8 @@ Equation (1) and try to write the PINA model class:
# Conditions to hold
conditions = {
'x0': Condition(Span({'x': 0.}), initial_condition),
'D': Condition(Span({'x': [0, 1]}), ode_equation),
'x0': Condition(location=Span({'x': 0.}), function=initial_condition),
'D': Condition(location=Span({'x': [0, 1]}), function=ode_equation),
}
# defining true solution
@@ -263,6 +263,7 @@ the results.
[epoch 03000] 4.049759e-04 2.937766e-06 4.020381e-04
After the training we have saved the final loss in ``final_loss``, which
we can inspect. By default PINA uses mean square error loss.

View File

@@ -21,7 +21,7 @@ First of all, some useful imports.
.. code:: ipython3
import torch
from torch.nn import ReLU, Tanh, Softplus
from torch.nn import Softplus
from pina.problem import SpatialProblem
from pina.operators import nabla
@@ -50,11 +50,11 @@ be compared with the predicted one.
return output_.extract(['u']) - value
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1]}), nil_dirichlet),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1]}), laplace_equation),
'gamma1': Condition(location=Span({'x': [0, 1], 'y': 1}), function=nil_dirichlet),
'gamma2': Condition(location=Span({'x': [0, 1], 'y': 0}), function=nil_dirichlet),
'gamma3': Condition(location=Span({'x': 1, 'y': [0, 1]}), function=nil_dirichlet),
'gamma4': Condition(location=Span({'x': 0, 'y': [0, 1]}), function=nil_dirichlet),
'D': Condition(location=Span({'x': [0, 1], 'y': [0, 1]}), function=laplace_equation),
}
def poisson_sol(self, pts):
@@ -108,28 +108,28 @@ is not mandatory in the **PINA** framework.
.. parsed-literal::
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00000] 4.821361e-01 7.271265e-02 5.749976e-02 7.188050e-02 5.793815e-02 2.221050e-01
[epoch 00000] 4.879922e-01 1.557781e-01 7.685463e-02 2.743466e-02 2.047883e-02 2.074460e-01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00001] 3.231621e-01 2.852444e-02 1.981721e-02 2.768876e-02 2.037603e-02 2.267557e-01
[epoch 00001] 2.610107e-01 1.067532e-03 8.390929e-03 2.391219e-02 1.467707e-02 2.129630e-01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00100] 1.015092e-01 5.198789e-04 2.826267e-03 3.158009e-03 2.300746e-03 9.270430e-02
[epoch 00100] 8.640952e-02 1.038323e-04 9.709063e-05 6.688796e-05 6.651071e-05 8.607519e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00200] 8.891604e-02 4.115215e-04 5.373723e-04 5.063288e-04 5.177262e-04 8.694309e-02
[epoch 00200] 2.996790e-02 4.977722e-04 6.639907e-04 5.634258e-04 7.204801e-04 2.752223e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00300] 8.620024e-02 3.734426e-04 4.014817e-04 3.966301e-04 4.261272e-04 8.460256e-02
[epoch 00300] 2.896983e-03 1.864277e-04 2.020803e-05 2.418693e-04 3.052877e-05 2.417949e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00400] 8.090379e-02 3.381128e-04 2.724089e-04 2.855197e-04 3.383889e-04 7.966936e-02
[epoch 00400] 1.865673e-03 1.250375e-04 2.438288e-05 1.595948e-04 6.709602e-06 1.549948e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00500] 7.000037e-02 2.501736e-04 7.233566e-05 1.258494e-04 1.898462e-04 6.936217e-02
[epoch 00500] 2.874877e-03 2.077810e-04 1.149128e-04 1.273361e-04 3.024802e-06 2.421822e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00600] 2.645028e-02 9.258305e-05 2.108825e-04 1.832870e-04 7.366277e-05 2.588986e-02
[epoch 00600] 1.310072e-03 1.081258e-04 3.365631e-05 1.059794e-04 3.468987e-06 1.058841e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00700] 2.599242e-03 5.990163e-05 9.679930e-05 1.735135e-04 3.957247e-05 2.229455e-03
[epoch 00700] 2.694587e-03 1.267468e-04 6.266955e-05 9.891923e-05 8.897325e-06 2.397354e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00800] 1.343722e-03 6.899313e-05 4.569854e-05 1.231751e-04 1.892484e-05 1.086931e-03
[epoch 00800] 5.028690e-03 1.435707e-04 5.986574e-06 9.517078e-05 4.583780e-05 4.738124e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00900] 8.533830e-04 6.269138e-05 2.274475e-05 8.422977e-05 1.782445e-05 6.658927e-04
[epoch 01000] 6.219158e-04 5.753698e-05 1.195975e-05 6.105051e-05 1.724382e-05 4.741247e-04
[epoch 00900] 9.997603e-04 9.684711e-05 9.155992e-06 8.875966e-05 1.261154e-05 7.923861e-04
[epoch 01000] 2.362966e-02 1.157872e-04 7.812096e-06 8.004917e-05 9.947084e-05 2.332654e-02
The neural network of course can be saved in a file. In such a way, we
@@ -153,7 +153,7 @@ and the predicted solutions is showed.
.. image:: output_13_0.png
.. image:: tutorial_files/tutorial_13_0.png
The problem solution with extra-features
@@ -209,28 +209,28 @@ new extra feature.
.. parsed-literal::
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00000] 8.334048e-02 1.480584e-02 1.326940e-02 1.505190e-02 1.282023e-02 2.739312e-02
[epoch 00000] 1.309440e-01 2.335824e-02 3.823499e-03 1.878588e-05 2.002613e-03 1.017409e-01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00001] 2.369340e-02 1.785535e-03 1.441936e-03 1.978278e-03 1.193302e-03 1.729435e-02
[epoch 00001] 5.053994e-02 6.420787e-03 6.924602e-03 4.746807e-03 1.751946e-03 3.069580e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00100] 4.190661e-05 5.259407e-06 2.207154e-06 1.740728e-06 1.258537e-06 3.144078e-05
[epoch 00100] 7.484706e-06 1.889349e-07 4.289622e-07 3.610726e-07 3.611258e-07 6.144610e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00200] 2.964181e-06 3.873027e-08 3.952280e-08 6.926503e-08 4.859637e-08 2.768067e-06
[epoch 00200] 6.941436e-06 4.738185e-07 4.590637e-07 5.098815e-07 5.365398e-07 4.962133e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00300] 2.477657e-06 3.019578e-08 3.888974e-08 5.290904e-08 4.751930e-08 2.308143e-06
[epoch 00300] 6.147081e-06 6.213511e-07 5.576677e-07 6.256337e-07 6.572442e-07 3.685184e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00400] 2.054579e-06 2.595518e-08 3.504910e-08 4.605295e-08 4.163064e-08 1.905891e-06
[epoch 00400] 6.056770e-06 7.646217e-07 6.377599e-07 7.242416e-07 7.616553e-07 3.168491e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00500] 1.716277e-06 2.342572e-08 3.247192e-08 4.101565e-08 3.697489e-08 1.582388e-06
[epoch 00500] 6.751128e-06 8.011474e-07 6.283512e-07 7.652199e-07 7.226305e-07 3.833779e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00600] 1.461072e-06 2.217194e-08 3.119703e-08 3.734558e-08 3.372288e-08 1.336635e-06
[epoch 00600] 2.839740e-05 5.422368e-06 4.058312e-06 4.664194e-06 4.984503e-06 9.268020e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00700] 1.275204e-06 2.180191e-08 3.080508e-08 3.476259e-08 3.154803e-08 1.156287e-06
[epoch 00700] 1.221099e-05 3.654685e-06 3.195583e-07 2.717753e-06 2.381476e-06 3.137519e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00800] 1.141423e-06 2.190318e-08 3.084367e-08 3.297679e-08 3.010750e-08 1.025592e-06
[epoch 00800] 5.423951e-06 6.111856e-07 4.348901e-07 5.353588e-07 5.398895e-07 3.302627e-06
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00900] 1.043816e-06 2.220373e-08 3.104670e-08 3.163695e-08 2.905372e-08 9.298745e-07
[epoch 01000] 9.697858e-07 2.242846e-08 3.111799e-08 3.060282e-08 2.824710e-08 8.573894e-07
[epoch 00900] 6.777007e-06 3.749606e-07 1.421852e-06 4.068826e-08 1.292241e-06 3.647265e-06
[epoch 01000] 6.803403e-05 2.302543e-07 3.886034e-05 4.901193e-06 2.005441e-05 3.987827e-06
The predicted and exact solutions and the error between them are
@@ -244,7 +244,7 @@ order of magnitude in accuracy.
.. image:: output_18_0.png
.. image:: tutorial_files/tutorial_18_0.png
The problem solution with learnable extra-features
@@ -296,28 +296,28 @@ need, and they are managed by ``autograd`` module!
.. parsed-literal::
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00000] 3.918677e-01 2.501913e-02 1.278682e-02 1.963722e-02 1.756839e-02 3.168561e-01
[epoch 00000] 7.147130e-02 1.942330e-03 7.350697e-03 2.868338e-03 1.184232e-03 5.812570e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00001] 1.345929e-01 1.696471e-02 9.475741e-03 1.432935e-02 1.169397e-02 8.212914e-02
[epoch 00001] 2.814954e-01 7.300152e-03 5.510583e-04 2.262258e-03 7.287678e-04 2.706531e-01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00100] 4.500092e-04 1.441140e-05 9.839978e-06 2.283052e-05 4.087769e-06 3.988396e-04
[epoch 00100] 1.961870e-04 3.066778e-06 5.342949e-07 2.670689e-06 9.807675e-07 1.889345e-04
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00200] 2.102947e-04 1.462936e-05 2.168394e-06 4.655578e-06 4.340448e-07 1.884074e-04
[epoch 00200] 1.208203e-04 3.096610e-06 1.253595e-06 2.603416e-06 1.962141e-06 1.119046e-04
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00300] 1.371512e-04 1.072066e-05 1.284032e-06 2.897264e-06 1.126986e-06 1.211222e-04
[epoch 00300] 3.992990e-05 3.451424e-06 6.415143e-07 1.576505e-06 1.244609e-06 3.301585e-05
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00400] 9.371716e-05 7.952534e-06 1.115802e-06 2.099921e-06 1.375253e-06 8.117365e-05
[epoch 00400] 3.466437e-04 1.722332e-06 1.461791e-05 3.052185e-06 8.755493e-06 3.184958e-04
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00500] 6.719316e-05 5.919826e-06 9.837649e-07 1.510521e-06 1.423588e-06 5.735546e-05
[epoch 00500] 5.242374e-03 3.230991e-05 1.387528e-05 5.379211e-06 3.145076e-06 5.187664e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00600] 5.042886e-05 4.428994e-06 8.414617e-07 1.083298e-06 1.338001e-06 4.273711e-05
[epoch 00600] 1.027368e-03 1.448758e-06 2.165510e-05 5.197179e-05 3.823021e-05 9.140619e-04
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00700] 3.907475e-05 3.327482e-06 7.004838e-07 7.866622e-07 1.162936e-06 3.309719e-05
[epoch 00700] 1.141694e-03 6.998039e-06 2.446730e-05 3.083524e-05 1.376935e-05 1.065624e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00800] 3.086757e-05 2.501366e-06 5.700428e-07 5.815515e-07 9.500203e-07 2.626459e-05
[epoch 00800] 3.619534e-04 3.120772e-06 1.223103e-05 2.211869e-05 9.567964e-06 3.149150e-04
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00900] 2.470110e-05 1.874311e-06 4.546698e-07 4.359081e-07 7.396913e-07 2.119652e-05
[epoch 01000] 1.999130e-05 1.396229e-06 3.562134e-07 3.291411e-07 5.548665e-07 1.735485e-05
[epoch 00900] 3.287693e-04 2.432459e-06 7.569996e-06 1.101516e-05 4.546776e-06 3.032049e-04
[epoch 01000] 5.432598e-04 8.919213e-06 1.991732e-05 2.632461e-05 7.365395e-06 4.807333e-04
Umh, the final loss is not appreciabily better than previous model (with
@@ -346,28 +346,28 @@ removing all the hidden layers in the ``FeedForward``, keeping only the
.. parsed-literal::
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00000] 1.974945e+00 2.002993e-03 7.012323e-02 2.755559e-02 1.584911e-02 1.859414e+00
[epoch 00000] 1.907039e+01 5.862396e-02 5.423664e-01 4.624593e-01 7.118504e-02 1.793576e+01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00001] 1.761779e+00 3.188374e-03 6.539153e-02 2.452723e-02 1.474262e-02 1.653930e+00
[epoch 00001] 1.698682e+01 3.348809e-02 4.943427e-01 3.972439e-01 6.141453e-02 1.600033e+01
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00100] 4.036187e-03 1.676370e-05 2.384196e-05 1.675912e-05 2.528631e-05 3.953536e-03
[epoch 00100] 8.010766e-02 1.765875e-04 6.100491e-04 1.604862e-04 5.841496e-04 7.857639e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00200] 3.638973e-06 9.148435e-09 5.011525e-09 8.995231e-09 5.055353e-09 3.610763e-06
[epoch 00200] 5.057434e-02 6.479959e-05 6.590948e-05 6.376287e-05 5.975253e-05 5.032011e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00300] 7.258809e-11 2.040413e-13 1.323202e-13 1.966580e-13 1.385408e-13 7.191653e-11
[epoch 00300] 1.974927e-02 3.145394e-05 1.531348e-05 3.037518e-05 1.363940e-05 1.965849e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00400] 1.095777e-13 2.320287e-16 3.792855e-17 2.308433e-16 3.710536e-17 1.090398e-13
[epoch 00400] 1.763019e-03 3.408035e-06 8.902280e-07 3.228933e-06 7.512407e-07 1.754741e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00500] 1.095686e-13 2.238822e-16 4.053546e-17 2.238880e-16 4.054121e-17 1.090398e-13
[epoch 00500] 2.604023e-05 5.248935e-08 1.091775e-08 4.940254e-08 9.077334e-09 2.591834e-05
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00600] 1.095686e-13 2.238991e-16 4.052415e-17 2.238992e-16 4.052421e-17 1.090398e-13
[epoch 00600] 7.279636e-08 1.490485e-10 3.004504e-11 1.392443e-10 2.490262e-11 7.245312e-08
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00700] 1.095686e-13 2.238992e-16 4.052411e-17 2.238992e-16 4.052410e-17 1.090398e-13
[epoch 00700] 2.307051e-11 5.051121e-14 1.083412e-14 4.412749e-14 8.684963e-15 2.295635e-11
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00800] 1.095686e-13 2.238992e-16 4.052411e-17 2.238992e-16 4.052410e-17 1.090398e-13
[epoch 00800] 9.755044e-12 1.745244e-14 3.232219e-15 1.735542e-14 3.347362e-15 9.713657e-12
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di Dlaplace_equ
[epoch 00900] 1.095686e-13 2.238992e-16 4.052411e-17 2.238992e-16 4.052410e-17 1.090398e-13
[epoch 01000] 1.095686e-13 2.238992e-16 4.052411e-17 2.238992e-16 4.052410e-17 1.090398e-13
[epoch 00900] 5.909113e-12 1.112281e-14 2.037945e-15 1.107687e-14 2.124603e-15 5.882751e-12
[epoch 01000] 3.220371e-12 5.622761e-15 1.002551e-15 5.519723e-15 9.455284e-16 3.207280e-12
In such a way, the model is able to reach a very high accuracy! Of
@@ -388,7 +388,7 @@ features.
.. image:: output_25_0.png
.. image:: tutorial_files/tutorial_25_0.png
.. code:: ipython3
@@ -406,5 +406,5 @@ features.
.. image:: output_26_0.png
.. image:: tutorial_files/tutorial_26_0.png

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

View File

@@ -63,12 +63,12 @@ predicted one.
return output_.extract(['u']) - u_expected
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1, 't': [0, 1]}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0, 't': [0, 1]}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1], 't': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1], 't': [0, 1]}), nil_dirichlet),
't0': Condition(Span({'x': [0, 1], 'y': [0, 1], 't': 0}), initial_condition),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1], 't': [0, 1]}), wave_equation),
'gamma1': Condition(location=Span({'x': [0, 1], 'y': 1, 't': [0, 1]}), function=nil_dirichlet),
'gamma2': Condition(location=Span({'x': [0, 1], 'y': 0, 't': [0, 1]}), function=nil_dirichlet),
'gamma3': Condition(location=Span({'x': 1, 'y': [0, 1], 't': [0, 1]}), function=nil_dirichlet),
'gamma4': Condition(location=Span({'x': 0, 'y': [0, 1], 't': [0, 1]}), function=nil_dirichlet),
't0': Condition(location=Span({'x': [0, 1], 'y': [0, 1], 't': 0}), function=initial_condition),
'D': Condition(location=Span({'x': [0, 1], 'y': [0, 1], 't': [0, 1]}), function=wave_equation),
}
def wave_sol(self, pts):
@@ -142,28 +142,28 @@ approximately one minute.
.. parsed-literal::
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00000] 4.567502e-01 2.847714e-02 1.962997e-02 9.094939e-03 1.247287e-02 3.838658e-01 3.209481e-03
[epoch 00000] 1.021557e-01 1.350026e-02 4.368403e-03 6.463497e-03 1.698729e-03 5.513944e-02 2.098533e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00001] 4.184132e-01 1.914901e-02 2.436301e-02 8.384322e-03 1.077990e-02 3.530422e-01 2.694697e-03
[epoch 00001] 8.096325e-02 7.543423e-03 2.978407e-03 7.128799e-03 2.084145e-03 3.967418e-02 2.155431e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00150] 1.694410e-01 9.840883e-03 1.117415e-02 1.140828e-02 1.003646e-02 1.260622e-01 9.190784e-04
[epoch 00150] 4.684930e-02 9.609548e-03 3.093602e-03 7.733506e-03 2.570329e-03 1.896760e-02 4.874712e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00300] 1.666860e-01 9.847926e-03 1.122043e-02 1.142906e-02 9.706282e-03 1.237589e-01 7.233715e-04
[epoch 00300] 3.519089e-02 6.642059e-03 2.865276e-03 6.399740e-03 2.900236e-03 1.244203e-02 3.941551e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00450] 1.564735e-01 8.579318e-03 1.203290e-02 1.264551e-02 8.249855e-03 1.136869e-01 1.279038e-03
[epoch 00450] 2.766160e-02 5.089254e-03 2.789679e-03 5.370538e-03 3.071685e-03 7.834940e-03 3.505504e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00600] 1.281068e-01 5.976059e-03 1.463099e-02 1.191054e-02 7.087692e-03 8.658079e-02 1.920737e-03
[epoch 00600] 2.361075e-02 4.279066e-03 2.785937e-03 4.689044e-03 3.101575e-03 5.907214e-03 2.847910e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00750] 7.482838e-02 5.880896e-03 1.912235e-02 5.754319e-03 4.252454e-03 3.697925e-02 2.839110e-03
[epoch 00750] 8.005206e-02 3.891625e-03 2.690672e-03 3.808867e-03 3.402538e-03 6.042966e-03 6.021538e-02
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 00900] 3.109156e-02 2.877797e-03 5.560369e-03 3.611543e-03 3.818088e-03 1.117986e-02 4.043903e-03
[epoch 00900] 1.892301e-02 3.592897e-03 2.639081e-03 3.797543e-03 2.988781e-03 3.860098e-03 2.044612e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 01050] 1.969596e-02 2.598281e-03 3.658714e-03 3.426491e-03 3.696677e-03 4.037755e-03 2.278043e-03
[epoch 01050] 1.739456e-02 3.420912e-03 2.557583e-03 3.532733e-03 2.910482e-03 3.114843e-03 1.858010e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 01200] 1.625224e-02 2.496960e-03 3.069649e-03 3.198287e-03 3.420298e-03 2.728654e-03 1.338392e-03
[epoch 01200] 1.663617e-02 3.213567e-03 2.571464e-03 3.355495e-03 2.749454e-03 3.247283e-03 1.498912e-03
sum gamma1nil_di gamma2nil_di gamma3nil_di gamma4nil_di t0initial_co Dwave_equati
[epoch 01350] 1.430180e-02 2.350929e-03 2.700139e-03 2.961276e-03 3.141905e-03 2.189825e-03 9.577314e-04
[epoch 01500] 1.293717e-02 2.182199e-03 2.440975e-03 2.706538e-03 2.904802e-03 1.891113e-03 8.115429e-04
[epoch 01350] 1.551488e-02 3.121611e-03 2.481438e-03 3.141828e-03 2.706321e-03 2.636140e-03 1.427544e-03
[epoch 01500] 1.497287e-02 2.974171e-03 2.475442e-03 2.979754e-03 2.593079e-03 2.723322e-03 1.227099e-03
After the training is completed one can now plot some results using the

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 21 KiB

File diff suppressed because one or more lines are too long

View File

@@ -69,7 +69,7 @@
#
# Once the problem class is initialized we need to write the differential equation in PINA language. For doing this we need to load the pina operators found in `pina.operators` module. Let's again consider the Equation (1) and try to write the PINA model class:
# In[14]:
# In[ ]:
from pina.problem import SpatialProblem
@@ -110,8 +110,8 @@ class SimpleODE(SpatialProblem):
# Conditions to hold
conditions = {
'x0': Condition(Span({'x': 0.}), initial_condition),
'D': Condition(Span({'x': [0, 1]}), ode_equation),
'x0': Condition(location=Span({'x': 0.}), function=initial_condition),
'D': Condition(location=Span({'x': [0, 1]}), function=ode_equation),
}
# defining true solution
@@ -129,7 +129,7 @@ class SimpleODE(SpatialProblem):
# The basics requirements for building a PINN model are a problem and a model. We have already covered the problem definition. For the model one can use the default models provided in PINA or use a custom model. We will not go into the details of model definition, Tutorial2 and Tutorial3 treat the topic in detail.
# In[31]:
# In[ ]:
from pina.model import FeedForward
@@ -157,7 +157,7 @@ pinn = PINN(problem, model)
# Once the `pinn` object is created, we need to generate the points for starting the optimization. For doing this we use the `span_pts` method of the `PINN` class.
# Let's see some methods to sample in $(0,1 )$.
# In[32]:
# In[ ]:
# sampling 20 points in (0, 1) with discrite step
@@ -172,7 +172,7 @@ pinn.span_pts(20, 'random', locations=['D'])
# We can also use a dictionary for specific variables:
# In[33]:
# In[ ]:
pinn.span_pts({'variables': ['x'], 'mode': 'grid', 'n': 20}, locations=['D'])
@@ -180,7 +180,7 @@ pinn.span_pts({'variables': ['x'], 'mode': 'grid', 'n': 20}, locations=['D'])
# We are going to use equispaced points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
# In[34]:
# In[ ]:
# sampling for training
@@ -192,7 +192,7 @@ pinn.span_pts(20, 'grid', locations=['D'])
#
# Once we have defined the PINA model, created a network and sampled points in the domain, we have everything that is necessary for training a PINN. Here we show a very short training and some method for plotting the results.
# In[35]:
# In[ ]:
# simple training
@@ -201,7 +201,7 @@ final_loss = pinn.train(stop=3000, frequency_print=1000)
# After the training we have saved the final loss in `final_loss`, which we can inspect. By default PINA uses mean square error loss.
# In[36]:
# In[ ]:
# inspecting final loss
@@ -210,7 +210,7 @@ final_loss
# By using the `Plotter` class from PINA we can also do some quatitative plots of the loss function.
# In[37]:
# In[ ]:
from pina.plotter import Plotter

File diff suppressed because one or more lines are too long

View File

@@ -18,11 +18,11 @@
# First of all, some useful imports.
# In[1]:
# In[ ]:
import torch
from torch.nn import ReLU, Tanh, Softplus
from torch.nn import Softplus
from pina.problem import SpatialProblem
from pina.operators import nabla
@@ -33,7 +33,7 @@ from pina import Condition, Span, PINN, LabelTensor, Plotter
# Now, the Poisson problem is written in PINA code as a class. The equations are written as *conditions* that should be satisfied in the corresponding domains. *truth_solution*
# is the exact solution which will be compared with the predicted one.
# In[2]:
# In[ ]:
class Poisson(SpatialProblem):
@@ -51,11 +51,11 @@ class Poisson(SpatialProblem):
return output_.extract(['u']) - value
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1]}), nil_dirichlet),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1]}), laplace_equation),
'gamma1': Condition(location=Span({'x': [0, 1], 'y': 1}), function=nil_dirichlet),
'gamma2': Condition(location=Span({'x': [0, 1], 'y': 0}), function=nil_dirichlet),
'gamma3': Condition(location=Span({'x': 1, 'y': [0, 1]}), function=nil_dirichlet),
'gamma4': Condition(location=Span({'x': 0, 'y': [0, 1]}), function=nil_dirichlet),
'D': Condition(location=Span({'x': [0, 1], 'y': [0, 1]}), function=laplace_equation),
}
def poisson_sol(self, pts):
@@ -75,7 +75,7 @@ class Poisson(SpatialProblem):
# The output of the cell below is the final loss of the training phase of the PINN.
# We highlight that the generation of the sampling points and the train is here encapsulated within the function `generate_samples_and_train`, but only for saving some lines of code in the next cells; that function is not mandatory in the **PINA** framework.
# In[3]:
# In[ ]:
def generate_samples_and_train(model, problem):
@@ -98,7 +98,7 @@ pinn = generate_samples_and_train(model, problem)
# The neural network of course can be saved in a file. In such a way, we can store it after the train, and load it just to infer the field. Here we don't store the model, but for demonstrative purposes we put in the next cell the commented line of code.
# In[4]:
# In[ ]:
# pinn.save_state('pina.poisson')
@@ -107,7 +107,7 @@ pinn = generate_samples_and_train(model, problem)
# Now the *Plotter* class is used to plot the results.
# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed.
# In[5]:
# In[ ]:
plotter = Plotter()
@@ -131,7 +131,7 @@ plotter.plot(pinn)
#
# Finally, we perform the same training as before: the problem is `Poisson`, the network is composed by the same number of neurons and optimizer parameters are equal to previous test, the only change is the new extra feature.
# In[6]:
# In[ ]:
class SinSin(torch.nn.Module):
@@ -158,7 +158,7 @@ pinn_feat = generate_samples_and_train(model_feat, problem)
# The predicted and exact solutions and the error between them are represented below.
# We can easily note that now our network, having almost the same condition as before, is able to reach an additional order of magnitude in accuracy.
# In[7]:
# In[ ]:
plotter.plot(pinn_feat)
@@ -178,7 +178,7 @@ plotter.plot(pinn_feat)
# where $\alpha$ and $\beta$ are the abovementioned parameters.
# Their implementation is quite trivial: by using the class `torch.nn.Parameter` we cam define all the learnable parameters we need, and they are managed by `autograd` module!
# In[8]:
# In[ ]:
class SinSinAB(torch.nn.Module):
@@ -209,7 +209,7 @@ pinn_learn = generate_samples_and_train(model_learn, problem)
# Umh, the final loss is not appreciabily better than previous model (with static extra features), despite the usage of learnable parameters. This is mainly due to the over-parametrization of the network: there are many parameter to optimize during the training, and the model in unable to understand automatically that only the parameters of the extra feature (and not the weights/bias of the FFN) should be tuned in order to fit our problem. A longer training can be helpful, but in this case the faster way to reach machine precision for solving the Poisson problem is removing all the hidden layers in the `FeedForward`, keeping only the $\alpha$ and $\beta$ parameters of the extra feature.
# In[9]:
# In[ ]:
model_learn = FeedForward(
@@ -227,13 +227,13 @@ pinn_learn = generate_samples_and_train(model_learn, problem)
#
# We conclude here by showing the graphical comparison of the unknown field and the loss trend for all the test cases presented here: the standard PINN, PINN with extra features, and PINN with learnable extra features.
# In[10]:
# In[ ]:
plotter.plot(pinn_learn)
# In[11]:
# In[ ]:
import matplotlib.pyplot as plt

File diff suppressed because one or more lines are too long

View File

@@ -21,7 +21,7 @@
# First of all, some useful imports.
# In[2]:
# In[1]:
import torch
@@ -34,7 +34,7 @@ from pina import Condition, Span, PINN, Plotter
# Now, the wave problem is written in PINA code as a class, inheriting from `SpatialProblem` and `TimeDependentProblem` since we deal with spatial, and time dependent variables. The equations are written as `conditions` that should be satisfied in the corresponding domains. `truth_solution` is the exact solution which will be compared with the predicted one.
# In[3]:
# In[2]:
class Wave(TimeDependentProblem, SpatialProblem):
@@ -58,12 +58,12 @@ class Wave(TimeDependentProblem, SpatialProblem):
return output_.extract(['u']) - u_expected
conditions = {
'gamma1': Condition(Span({'x': [0, 1], 'y': 1, 't': [0, 1]}), nil_dirichlet),
'gamma2': Condition(Span({'x': [0, 1], 'y': 0, 't': [0, 1]}), nil_dirichlet),
'gamma3': Condition(Span({'x': 1, 'y': [0, 1], 't': [0, 1]}), nil_dirichlet),
'gamma4': Condition(Span({'x': 0, 'y': [0, 1], 't': [0, 1]}), nil_dirichlet),
't0': Condition(Span({'x': [0, 1], 'y': [0, 1], 't': 0}), initial_condition),
'D': Condition(Span({'x': [0, 1], 'y': [0, 1], 't': [0, 1]}), wave_equation),
'gamma1': Condition(location=Span({'x': [0, 1], 'y': 1, 't': [0, 1]}), function=nil_dirichlet),
'gamma2': Condition(location=Span({'x': [0, 1], 'y': 0, 't': [0, 1]}), function=nil_dirichlet),
'gamma3': Condition(location=Span({'x': 1, 'y': [0, 1], 't': [0, 1]}), function=nil_dirichlet),
'gamma4': Condition(location=Span({'x': 0, 'y': [0, 1], 't': [0, 1]}), function=nil_dirichlet),
't0': Condition(location=Span({'x': [0, 1], 'y': [0, 1], 't': 0}), function=initial_condition),
'D': Condition(location=Span({'x': [0, 1], 'y': [0, 1], 't': [0, 1]}), function=wave_equation),
}
def wave_sol(self, pts):
@@ -80,7 +80,7 @@ problem = Wave()
#
# This neural network takes as input the coordinates (in this case $x$, $y$ and $t$) and provides the unkwown field of the Wave problem. The residual of the equations are evaluated at several sampling points (which the user can manipulate using the method `span_pts`) and the loss minimized by the neural network is the sum of the residuals.
# In[4]:
# In[3]:
class TorchNet(torch.nn.Module):
@@ -109,7 +109,7 @@ model = Network(model = TorchNet(),
# In this tutorial, the neural network is trained for 2000 epochs with a learning rate of 0.001. These parameters can be modified as desired.
# We highlight that the generation of the sampling points and the train is here encapsulated within the function `generate_samples_and_train`, but only for saving some lines of code in the next cells; that function is not mandatory in the **PINA** framework. The training takes approximately one minute.
# In[5]:
# In[7]:
def generate_samples_and_train(model, problem):
@@ -126,7 +126,7 @@ pinn = generate_samples_and_train(model, problem)
# After the training is completed one can now plot some results using the `Plotter` class of **PINA**.
# In[11]:
# In[8]:
plotter = Plotter()
@@ -137,7 +137,7 @@ plotter.plot(pinn, fixed_variables={'t': 0.6})
# We can also plot the pinn loss during the training to see the decrease.
# In[12]:
# In[9]:
import matplotlib.pyplot as plt