From 578c5bc2f4763e3a7d01507fbc3c815af03cb75c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 10:51:06 +0200 Subject: [PATCH] export tutorials changed in 5aec5f5 (#540) --- docs/source/tutorials/tutorial1/tutorial.html | 70 +- .../source/tutorials/tutorial10/tutorial.html | 16 +- .../source/tutorials/tutorial11/tutorial.html | 68 +- .../source/tutorials/tutorial12/tutorial.html | 7827 +++++++++++++++++ .../source/tutorials/tutorial13/tutorial.html | 34 +- .../source/tutorials/tutorial14/tutorial.html | 16 +- docs/source/tutorials/tutorial2/tutorial.html | 56 +- docs/source/tutorials/tutorial3/tutorial.html | 30 +- docs/source/tutorials/tutorial4/tutorial.html | 102 +- docs/source/tutorials/tutorial5/tutorial.html | 22 +- docs/source/tutorials/tutorial6/tutorial.html | 48 +- docs/source/tutorials/tutorial7/tutorial.html | 10 +- docs/source/tutorials/tutorial8/tutorial.html | 20 +- docs/source/tutorials/tutorial9/tutorial.html | 20 +- tutorials/tutorial1/tutorial.py | 60 +- tutorials/tutorial10/tutorial.py | 78 +- tutorials/tutorial11/tutorial.py | 68 +- tutorials/tutorial12/tutorial.py | 26 +- tutorials/tutorial13/tutorial.py | 44 +- tutorials/tutorial14/tutorial.py | 20 +- tutorials/tutorial2/tutorial.py | 40 +- tutorials/tutorial3/tutorial.py | 32 +- tutorials/tutorial4/tutorial.py | 50 +- tutorials/tutorial5/tutorial.py | 24 +- tutorials/tutorial6/tutorial.py | 24 +- tutorials/tutorial7/tutorial.py | 22 +- tutorials/tutorial8/tutorial.py | 12 +- tutorials/tutorial9/tutorial.py | 36 +- 28 files changed, 8349 insertions(+), 526 deletions(-) create mode 100644 docs/source/tutorials/tutorial12/tutorial.html diff --git a/docs/source/tutorials/tutorial1/tutorial.html b/docs/source/tutorials/tutorial1/tutorial.html index 7dea1b1..d8010e1 100644 --- a/docs/source/tutorials/tutorial1/tutorial.html +++ b/docs/source/tutorials/tutorial1/tutorial.html @@ -7624,7 +7624,7 @@ $$

try: import google.colab - IN_COLAB = True + IN_COLAB = True except: IN_COLAB = False if IN_COLAB: @@ -7843,26 +7843,26 @@ $$

-
Input points: {'x0': LabelTensor([[0.]]), 'D': LabelTensor([[0.3416],
+
Input points: {'x0': LabelTensor([[0.]]), 'D': LabelTensor([[0.9337],
              [0.0857],
-             [0.5368],
-             [0.7287],
-             [0.4425],
-             [0.6176],
-             [0.6806],
-             [0.0268],
-             [0.3685],
-             [0.1342],
-             [0.9353],
-             [0.2686],
-             [0.2114],
-             [0.8439],
-             [0.7916],
-             [0.1877],
-             [0.9715],
-             [0.4534],
-             [0.5888],
-             [0.8793]])}
+             [0.7990],
+             [0.8456],
+             [0.2606],
+             [0.1254],
+             [0.5825],
+             [0.6755],
+             [0.2170],
+             [0.9972],
+             [0.8914],
+             [0.4642],
+             [0.4323],
+             [0.1694],
+             [0.6003],
+             [0.0351],
+             [0.5070],
+             [0.3535],
+             [0.7230],
+             [0.3159]])}
 Input points labels: ['x']
 
@@ -7906,13 +7906,13 @@ Input points labels: ['x']
@@ -8018,12 +8018,12 @@ Input points labels: ['x']
-
+
@@ -8069,9 +8069,9 @@ var element = document.getElementById('b3bf6177-54a9-4f90-9738-c42ea2bff7ce');
@@ -8114,13 +8114,13 @@ var element = document.getElementById('b3bf6177-54a9-4f90-9738-c42ea2bff7ce');
Out[9]:
-
<matplotlib.legend.Legend at 0x7f747eda3af0>
+
<matplotlib.legend.Legend at 0x7fc84f9891c0>
-No description has been provided for this image +No description has been provided for this image
@@ -8270,12 +8270,12 @@ To visualize the loss you can run tensorboard --logdir 'tutorial_logs' on your t
-
+
@@ -8289,7 +8289,7 @@ var element = document.getElementById('ed678473-ee92-4467-8c6e-e4053a0ab45c');
@@ -8319,6 +8319,6 @@ var element = document.getElementById('ed678473-ee92-4467-8c6e-e4053a0ab45c'); diff --git a/docs/source/tutorials/tutorial10/tutorial.html b/docs/source/tutorials/tutorial10/tutorial.html index a292490..aec6591 100644 --- a/docs/source/tutorials/tutorial10/tutorial.html +++ b/docs/source/tutorials/tutorial10/tutorial.html @@ -7569,7 +7569,7 @@ the operator of time dependent PDEs.

except: IN_COLAB = False if IN_COLAB: - !pip install "pina-mathlab" + !pip install "pina-mathlab" # get the data !mkdir "data" !wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial10/data/Data_KS.mat" -O "data/Data_KS.mat" @@ -7917,12 +7917,12 @@ and the AveragingNeuralOperator model. As done in the
-
+
@@ -7973,7 +7973,7 @@ var element = document.getElementById('8de242e0-f62d-4c40-afa5-f45747f31dec');
@@ -8025,8 +8025,8 @@ Let's take a look at the training and testing error:

@@ -8066,6 +8066,6 @@ Testing error: 0.144 diff --git a/docs/source/tutorials/tutorial11/tutorial.html b/docs/source/tutorials/tutorial11/tutorial.html index ecd5c11..5b4e5b5 100644 --- a/docs/source/tutorials/tutorial11/tutorial.html +++ b/docs/source/tutorials/tutorial11/tutorial.html @@ -7878,12 +7878,12 @@ can be initialized by simiply passing the PINN solver

- @@ -7917,12 +7917,12 @@ var element = document.getElementById('781ce4a7-f301-442f-979e-b838a86510aa');
- @@ -7956,12 +7956,12 @@ var element = document.getElementById('f396baca-368f-4a3c-8e95-c63fa346c4d4');
- @@ -8147,12 +8147,12 @@ Lightning has a callback system to execute them when needed. Callbacks should ca
- @@ -8197,15 +8197,15 @@ var element = document.getElementById('536dd31a-0bcb-406a-98f0-854457d1fc3b');
@@ -8384,12 +8384,12 @@ We use the
-
+
@@ -8403,7 +8403,7 @@ var element = document.getElementById('291e957a-da06-4fad-8e6e-6944af73f810');
@@ -8489,12 +8489,12 @@ var element = document.getElementById('291e957a-da06-4fad-8e6e-6944af73f810');
- @@ -8515,7 +8515,7 @@ var element = document.getElementById('efe16467-d78a-4824-83b5-b329a8f7dc6c'); @@ -8602,12 +8602,12 @@ This is because by default StochasticWeightAveraging will be activa
- @@ -8628,7 +8628,7 @@ var element = document.getElementById('d9bcd992-ec4e-43d8-9a3b-8b08f9ee246a'); @@ -8658,6 +8658,6 @@ var element = document.getElementById('d9bcd992-ec4e-43d8-9a3b-8b08f9ee246a'); diff --git a/docs/source/tutorials/tutorial12/tutorial.html b/docs/source/tutorials/tutorial12/tutorial.html new file mode 100644 index 0000000..b34b5f3 --- /dev/null +++ b/docs/source/tutorials/tutorial12/tutorial.html @@ -0,0 +1,7827 @@ + + + + + +tutorial + + + + + + + + + + + + +
+ + + + + + + + + + + +
+ + diff --git a/docs/source/tutorials/tutorial13/tutorial.html b/docs/source/tutorials/tutorial13/tutorial.html index e16b822..55fc6e4 100644 --- a/docs/source/tutorials/tutorial13/tutorial.html +++ b/docs/source/tutorials/tutorial13/tutorial.html @@ -7576,7 +7576,7 @@ multi-scale PDEs with physics-informed neural networks.

import warnings from pina import Condition, Trainer -from pina.problem import SpatialProblem +from pina.problem import SpatialProblem from pina.operator import laplacian from pina.solver import PINN, SelfAdaptivePINN as SAPINN from pina.loss import LpLoss @@ -7762,12 +7762,12 @@ approximate a function $u$, given sufficient data inside the computational domai
-
+
@@ -7801,12 +7801,12 @@ var element = document.getElementById('8da50878-400e-44c2-b449-584b20226c9a');
-
+
@@ -7856,13 +7856,13 @@ var element = document.getElementById('7348d301-a14e-48b1-93c4-9ec281da6330');
@@ -7910,8 +7910,8 @@ var element = document.getElementById('7348d301-a14e-48b1-93c4-9ec281da6330');
-
Relative l2 error PINN      3046.36%
-Relative l2 error SAPINN    1797.21%
+
Relative l2 error PINN      2414.08%
+Relative l2 error SAPINN    2857.30%
 
@@ -8049,12 +8049,12 @@ are applied to input coordinates and then passed through the same fully-connecte
-
+
@@ -8106,14 +8106,14 @@ var element = document.getElementById('e4b8a7cd-5aef-4892-ad24-0c8506049c29');
@@ -8144,6 +8144,6 @@ var element = document.getElementById('e4b8a7cd-5aef-4892-ad24-0c8506049c29'); diff --git a/docs/source/tutorials/tutorial14/tutorial.html b/docs/source/tutorials/tutorial14/tutorial.html index 27ee773..e2fcd5e 100644 --- a/docs/source/tutorials/tutorial14/tutorial.html +++ b/docs/source/tutorials/tutorial14/tutorial.html @@ -7560,7 +7560,7 @@ a.anchor-link { except: IN_COLAB = False if IN_COLAB: - !pip install "pina-mathlab" + !pip install "pina-mathlab" %matplotlib inline @@ -7887,8 +7887,8 @@ a.anchor-link {
Error summary for POD-RBF model:
-  Train: 8.186969e-03
-  Test:  5.062145e-02
+  Train: 8.187051e-03
+  Test:  5.066957e-02
 
@@ -7961,7 +7961,7 @@ a.anchor-link {
-No description has been provided for this image +No description has been provided for this image
@@ -8015,8 +8015,8 @@ a.anchor-link {
Error summary for POD-RBF model:
-  Train: 4.524322e-02
-  Test:  4.513036e+06
+  Train: 4.455698e-02
+  Test:  4.281982e+06
 
@@ -8180,8 +8180,8 @@ a.anchor-link {
Error summary for POD-RBF model:
-  Train: 4.368320e-02
-  Test:  2.537675e-01
+  Train: 2.012740e-01
+  Test:  3.015688e-01
 
diff --git a/docs/source/tutorials/tutorial2/tutorial.html b/docs/source/tutorials/tutorial2/tutorial.html index 3d77619..92225e6 100644 --- a/docs/source/tutorials/tutorial2/tutorial.html +++ b/docs/source/tutorials/tutorial2/tutorial.html @@ -7568,7 +7568,7 @@ a.anchor-link { if IN_COLAB: !pip install "pina-mathlab" -import torch +import torch import matplotlib.pyplot as plt import warnings @@ -7757,12 +7757,12 @@ They are: ['g1', 'g2', 'g3', 'g4', 'D']
-
+
@@ -7857,7 +7857,7 @@ The solution predicted by the neural network is plotted on the left, the exact o
@@ -7991,12 +7991,12 @@ The set of input variables to the neural network is:

-
+
@@ -8043,7 +8043,7 @@ We can easily note that now our network, having almost the same condition as bef
@@ -8161,12 +8161,12 @@ Their implementation is quite trivial: by using the class torch.nn.Paramet
- @@ -8255,12 +8255,12 @@ var element = document.getElementById('ccc2ffd5-1fe6-4c1c-8e95-de8acce1c8df');
- @@ -8331,12 +8331,12 @@ Of course, this is a toy problem for understanding the usage of extra features:
- @@ -8346,7 +8346,7 @@ var element = document.getElementById('83bd3484-efd0-415e-bef2-abdff5aa9a64');
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
        Test metric             DataLoader 0
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
-        test_loss           0.30236542224884033
+        test_loss          0.061407435685396194
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
 PINN with extra features
 
@@ -8354,12 +8354,12 @@ PINN with extra features
- @@ -8369,7 +8369,7 @@ var element = document.getElementById('d1211dea-768d-46f0-9dec-f04555b7cdcd');
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
        Test metric             DataLoader 0
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
-        test_loss          0.0028569151181727648
+        test_loss          0.002801974304020405
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
 PINN with learnable extra features
 
@@ -8377,12 +8377,12 @@ PINN with learnable extra features
- @@ -8392,7 +8392,7 @@ var element = document.getElementById('4c591175-663b-4a76-8674-c382743e55bd');
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
        Test metric             DataLoader 0
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
-        test_loss          1.570544562456977e-11
+        test_loss          9.326550816368684e-12
 ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
 
@@ -8424,6 +8424,6 @@ var element = document.getElementById('4c591175-663b-4a76-8674-c382743e55bd'); diff --git a/docs/source/tutorials/tutorial3/tutorial.html b/docs/source/tutorials/tutorial3/tutorial.html index 31bbedc..44c63ce 100644 --- a/docs/source/tutorials/tutorial3/tutorial.html +++ b/docs/source/tutorials/tutorial3/tutorial.html @@ -7565,7 +7565,7 @@ a.anchor-link { IN_COLAB = True except: IN_COLAB = False -if IN_COLAB: +if IN_COLAB: !pip install "pina-mathlab" import torch @@ -7839,12 +7839,12 @@ u(x, y, t) = 0 \quad \text{on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma
- @@ -7896,13 +7896,13 @@ var element = document.getElementById('a383f7d1-105d-483f-bdd1-ec9f1d72d0f0');
@@ -7999,19 +7999,19 @@ var element = document.getElementById('a383f7d1-105d-483f-bdd1-ec9f1d72d0f0');
@@ -8141,12 +8141,12 @@ var element = document.getElementById('a383f7d1-105d-483f-bdd1-ec9f1d72d0f0');
- @@ -8204,13 +8204,13 @@ var element = document.getElementById('f414dcf2-8c2c-4c0a-984d-937ad8fa7ed5');
@@ -8251,6 +8251,6 @@ var element = document.getElementById('f414dcf2-8c2c-4c0a-984d-937ad8fa7ed5'); diff --git a/docs/source/tutorials/tutorial4/tutorial.html b/docs/source/tutorials/tutorial4/tutorial.html index 5874dfb..34a3638 100644 --- a/docs/source/tutorials/tutorial4/tutorial.html +++ b/docs/source/tutorials/tutorial4/tutorial.html @@ -7588,7 +7588,7 @@ a.anchor-link { if IN_COLAB: !pip install "pina-mathlab" -import torch +import torch import matplotlib.pyplot as plt import torchvision # for MNIST dataset import warnings @@ -8004,7 +8004,13 @@ Failed to download (trying next): HTTP Error 404: Not Found Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz -Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz + + + +
+ +
@@ -8017,7 +8023,13 @@ Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyt
+
+
+ +
@@ -8056,20 +8068,7 @@ Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyt - @@ -7964,7 +7964,7 @@ tensor([[2.2609, 2.3265],
@@ -8200,7 +8200,7 @@ tensor([[2.2609, 2.3265],
diff --git a/docs/source/tutorials/tutorial7/tutorial.html b/docs/source/tutorials/tutorial7/tutorial.html index b7b0ce3..41ac18b 100644 --- a/docs/source/tutorials/tutorial7/tutorial.html +++ b/docs/source/tutorials/tutorial7/tutorial.html @@ -7605,7 +7605,7 @@ where $\Omega$ is a square domain $[-2, 2] \times [-2, 2]$, and $\partial \Omega
## routine needed to run the notebook on Google Colab
 try:
-    import google.colab 
+    import google.colab
 
     IN_COLAB = True
 except:
@@ -7989,12 +7989,12 @@ The goal is to read the saved parameters after training and plot their trend acr
 
@@ -8099,8 +8099,8 @@ var element = document.getElementById('b7f35f1c-79c6-4f30-b8a0-5cff7734c220');
@@ -8199,7 +8199,7 @@ var element = document.getElementById('b7f35f1c-79c6-4f30-b8a0-5cff7734c220');
@@ -8222,6 +8222,6 @@ var element = document.getElementById('b7f35f1c-79c6-4f30-b8a0-5cff7734c220'); diff --git a/docs/source/tutorials/tutorial9/tutorial.html b/docs/source/tutorials/tutorial9/tutorial.html index 24a5b77..0232215 100644 --- a/docs/source/tutorials/tutorial9/tutorial.html +++ b/docs/source/tutorials/tutorial9/tutorial.html @@ -7573,11 +7573,11 @@ physics-informed neural networks.

!pip install "pina-mathlab" import torch -import matplotlib.pyplot as plt +import matplotlib.pyplot as plt import warnings from pina import Condition, Trainer -from pina.problem import SpatialProblem +from pina.problem import SpatialProblem from pina.operator import laplacian from pina.model import FeedForward from pina.model.block import PeriodicBoundaryEmbedding # The PBC module @@ -7818,12 +7818,12 @@ would indicate a periodicity of $2$ in $x$, $3$ in $y$, and so on...

- @@ -7865,7 +7865,7 @@ var element = document.getElementById('29cc13fa-b22c-40de-bd88-95baba67e10e');
@@ -7907,13 +7907,13 @@ var element = document.getElementById('29cc13fa-b22c-40de-bd88-95baba67e10e');
@@ -7972,7 +7972,7 @@ var element = document.getElementById('29cc13fa-b22c-40de-bd88-95baba67e10e');
@@ -8003,6 +8003,6 @@ var element = document.getElementById('29cc13fa-b22c-40de-bd88-95baba67e10e'); diff --git a/tutorials/tutorial1/tutorial.py b/tutorials/tutorial1/tutorial.py index b6cb93c..42b5483 100644 --- a/tutorials/tutorial1/tutorial.py +++ b/tutorials/tutorial1/tutorial.py @@ -2,21 +2,21 @@ # coding: utf-8 # # Tutorial: Physics Informed Neural Networks on PINA -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial1/tutorial.ipynb) -# +# -# In this tutorial, we will demonstrate a typical use case of **PINA** on a toy problem, following the standard API procedure. -# +# In this tutorial, we will demonstrate a typical use case of **PINA** on a toy problem, following the standard API procedure. +# #

# PINA API #

-# +# # Specifically, the tutorial aims to introduce the following topics: -# +# # * Explaining how to build **PINA** Problems, # * Showing how to generate data for `PINN` training -# +# # These are the two main steps needed **before** starting the modelling optimization (choose model and solver, and train). We will show each step in detail, and at the end, we will solve a simple Ordinary Differential Equation (ODE) problem using the `PINN` solver. # ## Build a PINA problem @@ -24,7 +24,7 @@ # Problem definition in the **PINA** framework is done by building a python `class`, which inherits from one or more problem classes (`SpatialProblem`, `TimeDependentProblem`, `ParametricProblem`, ...) depending on the nature of the problem. Below is an example: # ### Simple Ordinary Differential Equation # Consider the following: -# +# # $$ # \begin{equation} # \begin{cases} @@ -33,25 +33,25 @@ # \end{cases} # \end{equation} # $$ -# +# # with the analytical solution $u(x) = e^x$. In this case, our ODE depends only on the spatial variable $x\in(0,1)$ , meaning that our `Problem` class is going to be inherited from the `SpatialProblem` class: -# +# # ```python # from pina.problem import SpatialProblem # from pina.domain import CartesianProblem -# +# # class SimpleODE(SpatialProblem): -# +# # output_variables = ['u'] # spatial_domain = CartesianProblem({'x': [0, 1]}) -# +# # # other stuff ... # ``` -# +# # Notice that we define `output_variables` as a list of symbols, indicating the output variables of our equation (in this case only $u$), this is done because in **PINA** the `torch.Tensor`s are labelled, allowing the user maximal flexibility for the manipulation of the tensor. The `spatial_domain` variable indicates where the sample points are going to be sampled in the domain, in this case $x\in[0,1]$. -# +# # What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`: -# +# # In[ ]: @@ -84,7 +84,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem): # where we have included the `temporal_domain` variable, indicating the time domain wanted for the solution. -# +# # In summary, using **PINA**, we can initialize a problem with a class which inherits from different base classes: `SpatialProblem`, `TimeDependentProblem`, `ParametricProblem`, and so on depending on the type of problem we are considering. Here are some examples (more on the official documentation): # * ``SpatialProblem`` $\rightarrow$ a differential equation with spatial variable(s) ``spatial_domain`` # * ``TimeDependentProblem`` $\rightarrow$ a time-dependent differential equation with temporal variable(s) ``temporal_domain`` @@ -92,7 +92,7 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem): # * ``AbstractProblem`` $\rightarrow$ any **PINA** problem inherits from here # ### Write the problem class -# +# # Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operator` module. Again, we'll consider Equation (1) and represent it in **PINA**: # In[ ]: @@ -146,14 +146,14 @@ problem = SimpleODE() # After we define the `Problem` class, we need to write different class methods, where each method is a function returning a residual. These functions are the ones minimized during PINN optimization, given the initial conditions. For example, in the domain $[0,1]$, the ODE equation (`ode_equation`) must be satisfied. We represent this by returning the difference between subtracting the variable `u` from its gradient (the residual), which we hope to minimize to 0. This is done for all conditions. Notice that we do not pass directly a `python` function, but an `Equation` object, which is initialized with the `python` function. This is done so that all the computations and internal checks are done inside **PINA**. -# +# # Once we have defined the function, we need to tell the neural network where these methods are to be applied. To do so, we use the `Condition` class. In the `Condition` class, we pass the location points and the equation we want minimized on those points (other possibilities are allowed, see the documentation for reference). -# +# # Finally, it's possible to define a `solution` function, which can be useful if we want to plot the results and see how the real solution compares to the expected (true) solution. Notice that the `solution` function is a method of the `PINN` class, but it is not mandatory for problem definition. -# +# -# ## Generate data -# +# ## Generate data +# # Data for training can come in form of direct numerical simulation results, or points in the domains. In case we perform unsupervised learning, we just need the collocation points for training, i.e. points where we want to evaluate the neural network. Sampling point in **PINA** is very easy, here we show three examples using the `.discretise_domain` method of the `AbstractProblem` class. # In[ ]: @@ -180,7 +180,7 @@ problem.discretise_domain(1, "random", domains=["x0"]) problem.discretise_domain(20, "lh", domains=["D"]) -# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem +# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem # In[ ]: @@ -252,7 +252,7 @@ trainer.train() trainer.logged_metrics -# By using `matplotlib` we can also do some qualitative plots of the solution. +# By using `matplotlib` we can also do some qualitative plots of the solution. # In[ ]: @@ -328,13 +328,13 @@ plt.yscale("log") # ## What's next? -# +# # Congratulations on completing the introductory tutorial of **PINA**! There are several directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the finaly accuracy -# +# # 2. Train the network using other types of models (see `pina.model`) -# +# # 3. GPU training and speed benchmarking -# +# # 4. Many more... diff --git a/tutorials/tutorial10/tutorial.py b/tutorials/tutorial10/tutorial.py index f5f57db..564617d 100644 --- a/tutorials/tutorial10/tutorial.py +++ b/tutorials/tutorial10/tutorial.py @@ -2,17 +2,17 @@ # coding: utf-8 # # Tutorial: Averaging Neural Operator for solving Kuramoto Sivashinsky equation -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial10/tutorial.ipynb) -# +# # In this tutorial we will build a Neural Operator using the # `AveragingNeuralOperator` model and the `SupervisedSolver`. At the end of the # tutorial you will be able to train a Neural Operator for learning # the operator of time dependent PDEs. -# -# +# +# # First of all, some useful imports. Note we use `scipy` for i/o operations. -# +# # In[ ]: @@ -28,12 +28,8 @@ if IN_COLAB: get_ipython().system('pip install "pina-mathlab"') # get the data get_ipython().system('mkdir "data"') - get_ipython().system( - 'wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial10/data/Data_KS.mat" -O "data/Data_KS.mat"' - ) - get_ipython().system( - 'wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial10/data/Data_KS2.mat" -O "data/Data_KS2.mat"' - ) + get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial10/data/Data_KS.mat" -O "data/Data_KS.mat"') + get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial10/data/Data_KS2.mat" -O "data/Data_KS2.mat"') import torch import matplotlib.pyplot as plt @@ -49,40 +45,40 @@ warnings.filterwarnings("ignore") # ## Data Generation -# +# # We will focus on solving a specific PDE, the **Kuramoto Sivashinsky** (KS) equation. # The KS PDE is a fourth-order nonlinear PDE with the following form: -# +# # $$ # \frac{\partial u}{\partial t}(x,t) = -u(x,t)\frac{\partial u}{\partial x}(x,t)- \frac{\partial^{4}u}{\partial x^{4}}(x,t) - \frac{\partial^{2}u}{\partial x^{2}}(x,t). # $$ -# +# # In the above $x\in \Omega=[0, 64]$ represents a spatial location, $t\in\mathbb{T}=[0,50]$ the time and $u(x, t)$ is the value of the function $u:\Omega \times\mathbb{T}\in\mathbb{R}$. We indicate with $\mathbb{U}$ a suitable space for $u$, i.e. we have that the solution $u\in\mathbb{U}$. -# -# +# +# # We impose Dirichlet boundary conditions on the derivative of $u$ on the border of the domain $\partial \Omega$ # $$ # \frac{\partial u}{\partial x}(x,t)=0 \quad \forall (x,t)\in \partial \Omega\times\mathbb{T}. # $$ -# -# Initial conditions are sampled from a distribution over truncated Fourier series with random coefficients +# +# Initial conditions are sampled from a distribution over truncated Fourier series with random coefficients # $\{A_k, \ell_k, \phi_k\}_k$ as # $$ # u(x,0) = \sum_{k=1}^N A_k \sin(2 \pi \ell_k x / L + \phi_k) \ , # $$ -# -# where $A_k \in [-0.4, -0.3]$, $\ell_k = 2$, $\phi_k = 2\pi \quad \forall k=1,\dots,N$. -# -# +# +# where $A_k \in [-0.4, -0.3]$, $\ell_k = 2$, $\phi_k = 2\pi \quad \forall k=1,\dots,N$. +# +# # We have already generated some data for differenti initial conditions, and our objective will # be to build a Neural Operator that, given $u(x, t)$ will output $u(x, t+\delta)$, where # $\delta$ is a fixed time step. We will come back on the Neural Operator architecture, for now # we first need to import the data. -# +# # **Note:** # *The numerical integration is obtained by using pseudospectral method for spatial derivative discratization and # implicit Runge Kutta 5 for temporal dynamics.* -# +# # In[2]: @@ -110,9 +106,9 @@ print(f" shape solution: {sol_train.shape}") # The data are saved in the form `B \times N \times D`, where `B` is the batch_size # (basically how many initial conditions we sample), `N` the number of points in the mesh -# (which is the product of the discretization in `x` timese the one in `t`), and +# (which is the product of the discretization in `x` timese the one in `t`), and # `D` the dimension of the problem (in this case we have three variables `[u, t, x]`). -# +# # We are now going to plot some trajectories! # In[3]: @@ -178,9 +174,9 @@ plot_trajectory( # As we can see, as the time progresses the solution becomes chaotic, which makes # it really hard to learn! We will now focus on building a Neural Operator using the # `SupervisedSolver` class to tackle the problem. -# +# # ## Averaging Neural Operator -# +# # We will build a neural operator $\texttt{NO}$ which takes the solution at time $t=0$ for any $x\in\Omega$, # the time $(t)$ at which we want to compute the solution, and gives back the solution to the KS equation $u(x, t)$, mathematically: # $$ @@ -190,26 +186,26 @@ plot_trajectory( # $$ # \texttt{NO}_\theta[u(t=0)](x, t) \rightarrow u(x, t). # $$ -# +# # There are many ways on approximating the following operator, e.g. by 2D [FNO](https://mathlab.github.io/PINA/_rst/models/fno.html) (for regular meshes), # a [DeepOnet](https://mathlab.github.io/PINA/_rst/models/deeponet.html), [Continuous Convolutional Neural Operator](https://mathlab.github.io/PINA/_rst/layers/convolution.html), -# [MIONet](https://mathlab.github.io/PINA/_rst/models/mionet.html). +# [MIONet](https://mathlab.github.io/PINA/_rst/models/mionet.html). # In this tutorial we will use the *Averaging Neural Operator* presented in [*The Nonlocal Neural Operator: Universal Approximation*](https://arxiv.org/abs/2304.13221) # which is a [Kernel Neural Operator](https://mathlab.github.io/PINA/_rst/models/base_no.html) with integral kernel: -# +# # $$ # K(v) = \sigma\left(Wv(x) + b + \frac{1}{|\Omega|}\int_\Omega v(y)dy\right) # $$ -# +# # where: -# +# # * $v(x)\in\mathbb{R}^{\rm{emb}}$ is the update for a function $v$ with $\mathbb{R}^{\rm{emb}}$ the embedding (hidden) size # * $\sigma$ is a non-linear activation # * $W\in\mathbb{R}^{\rm{emb}\times\rm{emb}}$ is a tunable matrix. # * $b\in\mathbb{R}^{\rm{emb}}$ is a tunable bias. -# +# # If PINA many Kernel Neural Operators are already implemented, and the modular componets of the [Kernel Neural Operator](https://mathlab.github.io/PINA/_rst/models/base_no.html) class permits to create new ones by composing base kernel layers. -# +# # **Note:*** We will use the already built class* `AveragingNeuralOperator`, *as constructive excercise try to use the* [KernelNeuralOperator](https://mathlab.github.io/PINA/_rst/models/base_no.html) *class for building a kernel neural operator from scratch. You might employ the different layers that we have in pina, e.g.* [FeedForward](https://mathlab.github.io/PINA/_rst/models/fnn.html), *and* [AveragingNeuralOperator](https://mathlab.github.io/PINA/_rst/layers/avno_layer.html) *layers*. # In[4]: @@ -236,9 +232,9 @@ model = AveragingNeuralOperator( # Super easy! Notice that we use the `SIREN` activation function, more on [Implicit Neural Representations with Periodic Activation Functions](https://arxiv.org/abs/2006.09661). -# +# # ## Solving the KS problem -# +# # We will now focus on solving the KS equation using the `SupervisedSolver` class # and the `AveragingNeuralOperator` model. As done in the [FNO tutorial](https://github.com/mathLab/PINA/blob/master/tutorials/tutorial5/tutorial.ipynb) we now create the Neural Operator problem class with `SupervisedProblem`. @@ -308,11 +304,11 @@ with torch.no_grad(): # As we can see the error is pretty small, which agrees with what we can see from the previous plots. # ## What's next? -# +# # Now you know how to solve a time dependent neural operator problem in **PINA**! There are multiple directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the final accuracy -# +# # 2. We left a more challenging dataset [Data_KS2.mat](dat/Data_KS2.mat) where $A_k \in [-0.5, 0.5]$, $\ell_k \in [1, 2, 3]$, $\phi_k \in [0, 2\pi]$ for longer training -# +# # 3. Compare the performance between the different neural operators (you can even try to implement your favourite one!) diff --git a/tutorials/tutorial11/tutorial.py b/tutorials/tutorial11/tutorial.py index df36aa1..02c1668 100644 --- a/tutorials/tutorial11/tutorial.py +++ b/tutorials/tutorial11/tutorial.py @@ -1,16 +1,16 @@ #!/usr/bin/env python # coding: utf-8 -# # Tutorial: PINA and PyTorch Lightning, training tips and visualizations -# +# # Tutorial: PINA and PyTorch Lightning, training tips and visualizations +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial11/tutorial.ipynb) -# -# In this tutorial, we will delve deeper into the functionality of the `Trainer` class, which serves as the cornerstone for training **PINA** [Solvers](https://mathlab.github.io/PINA/_rst/_code.html#solvers). -# +# +# In this tutorial, we will delve deeper into the functionality of the `Trainer` class, which serves as the cornerstone for training **PINA** [Solvers](https://mathlab.github.io/PINA/_rst/_code.html#solvers). +# # The `Trainer` class offers a plethora of features aimed at improving model accuracy, reducing training time and memory usage, facilitating logging visualization, and more thanks to the amazing job done by the PyTorch Lightning team! -# +# # Our leading example will revolve around solving the `SimpleODE` problem, as outlined in the [*Introduction to PINA for Physics Informed Neural Networks training*](https://github.com/mathLab/PINA/blob/master/tutorials/tutorial1/tutorial.ipynb). If you haven't already explored it, we highly recommend doing so before diving into this tutorial. -# +# # Let's start by importing useful modules, define the `SimpleODE` problem and the `PINN` solver. # In[ ]: @@ -105,7 +105,7 @@ trainer = Trainer(solver=pinn) # ## Trainer Accelerator -# +# # When creating the trainer, **by defualt** the `Trainer` will choose the most performing `accelerator` for training which is available in your system, ranked as follow: # 1. [TPU](https://cloud.google.com/tpu/docs/intro-to-tpu) # 2. [IPU](https://www.graphcore.ai/products/ipu) @@ -114,7 +114,7 @@ trainer = Trainer(solver=pinn) # 5. CPU # For setting manually the `accelerator` run: -# +# # * `accelerator = {'gpu', 'cpu', 'hpu', 'mps', 'cpu', 'ipu'}` sets the accelerator to a specific one # In[4]: @@ -126,13 +126,13 @@ trainer = Trainer(solver=pinn, accelerator="cpu") # as you can see, even if in the used system `GPU` is available, it is not used since we set `accelerator='cpu'`. # ## Trainer Logging -# +# # In **PINA** you can log metrics in different ways. The simplest approach is to use the `MetricTraker` class from `pina.callbacks` as seen in the [*Introduction to PINA for Physics Informed Neural Networks training*](https://github.com/mathLab/PINA/blob/master/tutorials/tutorial1/tutorial.ipynb) tutorial. -# +# # However, expecially when we need to train multiple times to get an average of the loss across multiple runs, `pytorch_lightning.loggers` might be useful. Here we will use `TensorBoardLogger` (more on [logging](https://lightning.ai/docs/pytorch/stable/extensions/logging.html) here), but you can choose the one you prefer (or make your own one). -# +# # We will now import `TensorBoardLogger`, do three runs of training and then visualize the results. Notice we set `enable_model_summary=False` to avoid model summary specifications (e.g. number of parameters), set it to true if needed. -# +# # In[5]: @@ -173,17 +173,17 @@ for _ in range(3): # Whenever we need to access certain steps of the training for logging, do static modifications (i.e. not changing the `Solver`) or updating `Problem` hyperparameters (static variables), we can use `Callabacks`. Notice that `Callbacks` allow you to add arbitrary self-contained programs to your training. At specific points during the flow of execution (hooks), the Callback interface allows you to design programs that encapsulate a full set of functionality. It de-couples functionality that does not need to be in **PINA** `Solver`s. # Lightning has a callback system to execute them when needed. Callbacks should capture NON-ESSENTIAL logic that is NOT required for your lightning module to run. -# +# # The following are best practices when using/designing callbacks. -# +# # * Callbacks should be isolated in their functionality. # * Your callback should not rely on the behavior of other callbacks in order to work properly. # * Do not manually call methods from the callback. # * Directly calling methods (eg. on_validation_end) is strongly discouraged. # * Whenever possible, your callbacks should not depend on the order in which they are executed. -# +# # We will try now to implement a naive version of `MetricTraker` to show how callbacks work. Notice that this is a very easy application of callbacks, fortunately in **PINA** we already provide more advanced callbacks in `pina.callbacks`. -# +# # # In[6]: @@ -207,7 +207,7 @@ class NaiveMetricTracker(Callback): ) -# Let's see the results when applyed to the `SimpleODE` problem. You can define callbacks when initializing the `Trainer` by the `callbacks` argument, which expects a list of callbacks. +# Let's see the results when applyed to the `SimpleODE` problem. You can define callbacks when initializing the `Trainer` by the `callbacks` argument, which expects a list of callbacks. # In[7]: @@ -240,8 +240,8 @@ trainer.train() trainer.callbacks[0].saved_metrics[:3] # only the first three epochs -# PyTorch Lightning also has some built in `Callbacks` which can be used in **PINA**, [here an extensive list](https://lightning.ai/docs/pytorch/stable/extensions/callbacks.html#built-in-callbacks). -# +# PyTorch Lightning also has some built in `Callbacks` which can be used in **PINA**, [here an extensive list](https://lightning.ai/docs/pytorch/stable/extensions/callbacks.html#built-in-callbacks). +# # We can for example try the `EarlyStopping` routine, which automatically stops the training when a specific metric converged (here the `train_loss`). In order to let the training keep going forever set `max_epochs=-1`. # In[ ]: @@ -271,18 +271,18 @@ trainer.train() # As we can see the model automatically stop when the logging metric stopped improving! # ## Trainer Tips to Boost Accuracy, Save Memory and Speed Up Training -# +# # Untill now we have seen how to choose the right `accelerator`, how to log and visualize the results, and how to interface with the program in order to add specific parts of code at specific points by `callbacks`. # Now, we well focus on how boost your training by saving memory and speeding it up, while mantaining the same or even better degree of accuracy! -# -# +# +# # There are several built in methods developed in PyTorch Lightning which can be applied straight forward in **PINA**, here we report some: -# +# # * [Stochastic Weight Averaging](https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/) to boost accuracy # * [Gradient Clippling](https://deepgram.com/ai-glossary/gradient-clipping) to reduce computational time (and improve accuracy) -# * [Gradient Accumulation](https://lightning.ai/docs/pytorch/stable/common/optimization.html#id3) to save memory consumption -# * [Mixed Precision Training](https://lightning.ai/docs/pytorch/stable/common/optimization.html#id3) to save memory consumption -# +# * [Gradient Accumulation](https://lightning.ai/docs/pytorch/stable/common/optimization.html#id3) to save memory consumption +# * [Mixed Precision Training](https://lightning.ai/docs/pytorch/stable/common/optimization.html#id3) to save memory consumption +# # We will just demonstrate how to use the first two, and see the results compared to a standard training. # We use the [`Timer`](https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.Timer.html#lightning.pytorch.callbacks.Timer) callback from `pytorch_lightning.callbacks` to take the times. Let's start by training a simple model without any optimization (train for 2000 epochs). @@ -347,7 +347,7 @@ print(f'Total training time {trainer.callbacks[0].time_elapsed("train"):.5f} s') # As you can see, the training time does not change at all! Notice that around epoch `1600` # the scheduler is switched from the defalut one `ConstantLR` to the Stochastic Weight Average Learning Rate (`SWALR`). # This is because by default `StochasticWeightAveraging` will be activated after `int(swa_epoch_start * max_epochs)` with `swa_epoch_start=0.7` by default. Finally, the final `mean_loss` is lower when `StochasticWeightAveraging` is used. -# +# # We will now now do the same but clippling the gradient to be relatively small. # In[12]: @@ -376,13 +376,13 @@ print(f'Total training time {trainer.callbacks[0].time_elapsed("train"):.5f} s') # As we can see we by applying gradient clipping we were able to even obtain lower error! -# +# # ## What's next? -# +# # Now you know how to use efficiently the `Trainer` class **PINA**! There are multiple directions you can go now: -# -# 1. Explore training times on different devices (e.g.) `TPU` -# +# +# 1. Explore training times on different devices (e.g.) `TPU` +# # 2. Try to reduce memory cost by mixed precision training and gradient accumulation (especially useful when training Neural Operators) -# +# # 3. Benchmark `Trainer` speed for different precisions. diff --git a/tutorials/tutorial12/tutorial.py b/tutorials/tutorial12/tutorial.py index 3007440..213e207 100644 --- a/tutorials/tutorial12/tutorial.py +++ b/tutorials/tutorial12/tutorial.py @@ -2,7 +2,7 @@ # coding: utf-8 # # Tutorial: The `Equation` Class -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial12/tutorial.ipynb) # In this tutorial, we will show how to use the `Equation` Class in PINA. Specifically, we will see how use the Class and its inherited classes to enforce residuals minimization in PINNs. @@ -10,8 +10,8 @@ # # Example: The Burgers 1D equation # We will start implementing the viscous Burgers 1D problem Class, described as follows: -# -# +# +# # $$ # \begin{equation} # \begin{cases} @@ -21,10 +21,10 @@ # \end{cases} # \end{equation} # $$ -# +# # where we set $ \nu = \frac{0.01}{\pi}$. -# -# In the class that models this problem we will see in action the `Equation` class and one of its inherited classes, the `FixedValue` class. +# +# In the class that models this problem we will see in action the `Equation` class and one of its inherited classes, the `FixedValue` class. # In[1]: @@ -99,18 +99,18 @@ class Burgers1D(TimeDependentProblem, SpatialProblem): } -# -# The `Equation` class takes as input a function (in this case it happens twice, with `initial_condition` and `burger_equation`) which computes a residual of an equation, such as a PDE. In a problem class such as the one above, the `Equation` class with such a given input is passed as a parameter in the specified `Condition`. -# +# +# The `Equation` class takes as input a function (in this case it happens twice, with `initial_condition` and `burger_equation`) which computes a residual of an equation, such as a PDE. In a problem class such as the one above, the `Equation` class with such a given input is passed as a parameter in the specified `Condition`. +# # The `FixedValue` class takes as input a value of same dimensions of the output functions; this class can be used to enforce a fixed value for a specific condition, e.g. Dirichlet boundary conditions, as it happens for instance in our example. -# -# Once the equations are set as above in the problem conditions, the PINN solver will aim to minimize the residuals described in each equation in the training phase. +# +# Once the equations are set as above in the problem conditions, the PINN solver will aim to minimize the residuals described in each equation in the training phase. # Available classes of equations include also: # - `FixedGradient` and `FixedFlux`: they work analogously to `FixedValue` class, where we can require a constant value to be enforced, respectively, on the gradient of the solution or the divergence of the solution; # - `Laplace`: it can be used to enforce the laplacian of the solution to be zero; # - `SystemEquation`: we can enforce multiple conditions on the same subdomain through this class, passing a list of residual equations defined in the problem. -# +# # # Defining a new Equation class @@ -182,7 +182,7 @@ class Burgers1D(TimeDependentProblem, SpatialProblem): # # What's next? -# Congratulations on completing the `Equation` class tutorial of **PINA**! As we have seen, you can build new classes that inherit `Equation` to store more complex equations, as the Burgers 1D equation, only requiring to pass the characteristic coefficients of the problem. +# Congratulations on completing the `Equation` class tutorial of **PINA**! As we have seen, you can build new classes that inherit `Equation` to store more complex equations, as the Burgers 1D equation, only requiring to pass the characteristic coefficients of the problem. # From now on, you can: # - define additional complex equation classes (e.g. `SchrodingerEquation`, `NavierStokeEquation`..) # - define more `FixedOperator` (e.g. `FixedCurl`) diff --git a/tutorials/tutorial13/tutorial.py b/tutorials/tutorial13/tutorial.py index 257e795..9f33b5b 100644 --- a/tutorials/tutorial13/tutorial.py +++ b/tutorials/tutorial13/tutorial.py @@ -2,15 +2,15 @@ # coding: utf-8 # # Tutorial: Multiscale PDE learning with Fourier Feature Network -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial13/tutorial.ipynb) -# +# # This tutorial presents how to solve with Physics-Informed Neural Networks (PINNs) # a PDE characterized by multiscale behaviour, as # presented in [*On the eigenvector bias of Fourier feature networks: From regression to solving # multi-scale PDEs with physics-informed neural networks*]( -# https://doi.org/10.1016/j.cma.2021.113938). -# +# https://doi.org/10.1016/j.cma.2021.113938). +# # First of all, some useful imports. # In[ ]: @@ -44,24 +44,24 @@ warnings.filterwarnings("ignore") # ## Multiscale Problem -# +# # We begin by presenting the problem which also can be found in Section 2 of [*On the eigenvector bias of Fourier feature networks: From regression to solving # multi-scale PDEs with physics-informed neural networks*]( # https://doi.org/10.1016/j.cma.2021.113938). The one-dimensional Poisson problem we aim to solve is mathematically written as: -# +# # \begin{equation} # \begin{cases} # \Delta u (x) + f(x) = 0 \quad x \in [0,1], \\ # u(x) = 0 \quad x \in \partial[0,1], \\ # \end{cases} # \end{equation} -# +# # We impose the solution as $u(x) = \sin(2\pi x) + 0.1 \sin(50\pi x)$ and obtain the force term $f(x) = (2\pi)^2 \sin(2\pi x) + 0.1 (50 \pi)^2 \sin(50\pi x)$. # Though this example is simple and pedagogical, it is worth noting that # the solution exhibits low frequency in the macro-scale and high frequency in the micro-scale, which resembles many # practical scenarios. -# -# +# +# # In **PINA** this problem is written, as always, as a class [see here for a tutorial on the Problem class](https://mathlab.github.io/PINA/_rst/tutorials/tutorial1/tutorial.html). Below you can find the `Poisson` problem which is mathmatically described above. # In[2]: @@ -110,8 +110,8 @@ problem.discretise_domain(1, "grid", domains=["bound_cond0", "bound_cond1"]) # A standard PINN approach would be to fit this model using a Feed Forward (fully connected) Neural Network. For a conventional fully-connected neural network is easy to # approximate a function $u$, given sufficient data inside the computational domain. However solving high-frequency or multi-scale problems presents great challenges to PINNs especially when the number of data cannot capture the different scales. -# -# Below we run a simulation using the `PINN` solver and the self adaptive `SAPINN` solver, using a [`FeedForward`](https://mathlab.github.io/PINA/_modules/pina/model/feed_forward.html#FeedForward) model. +# +# Below we run a simulation using the `PINN` solver and the self adaptive `SAPINN` solver, using a [`FeedForward`](https://mathlab.github.io/PINA/_modules/pina/model/feed_forward.html#FeedForward) model. # In[3]: @@ -177,7 +177,7 @@ plot_solution(sapinn, "Self Adaptive PINN solution") # We can clearly see that the solution has not been learned by the two different solvers. Indeed the big problem is not in the optimization strategy (i.e. the solver), but in the model used to solve the problem. A simple `FeedForward` network can hardly handle multiscales if not enough collocation points are used! -# +# # We can also compute the $l_2$ relative error for the `PINN` and `SAPINN` solutions: # In[5]: @@ -204,11 +204,11 @@ print( # first introduced in [*On the eigenvector bias of Fourier feature networks: From regression to solving # multi-scale PDEs with physics-informed neural networks*]( # https://doi.org/10.1016/j.cma.2021.113938) showing great results for multiscale problems. The basic idea is to map the input $\mathbf{x}$ into an embedding $\tilde{\mathbf{x}}$ where: -# +# # $$ \tilde{\mathbf{x}} =\left[\cos\left( \mathbf{B} \mathbf{x} \right), \sin\left( \mathbf{B} \mathbf{x} \right)\right] $$ -# -# and $\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)$. This simple operation allow the network to learn on multiple scales! -# +# +# and $\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)$. This simple operation allow the network to learn on multiple scales! +# # In PINA we already have implemented the feature as a `layer` called [`FourierFeatureEmbedding`](https://mathlab.github.io/PINA/_rst/layers/fourier_embedding.html). Below we will build the *Multi-scale Fourier Feature Architecture*. In this architecture multiple Fourier feature embeddings (initialized with different $\sigma$) # are applied to input coordinates and then passed through the same fully-connected neural network, before the outputs are finally concatenated with a linear layer. @@ -269,15 +269,15 @@ print( # It is pretty clear that the network has learned the correct solution, with also a very low error. Obviously a longer training and a more expressive neural network could improve the results! -# +# # ## What's next? -# +# # Congratulations on completing the one dimensional Poisson tutorial of **PINA** using `FourierFeatureEmbedding`! There are multiple directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the finaly accuracy -# +# # 2. Understand the role of `sigma` in `FourierFeatureEmbedding` (see original paper for a nice reference) -# +# # 3. Code the *Spatio-temporal multi-scale Fourier feature architecture* for a more complex time dependent PDE (section 3 of the original reference) -# +# # 4. Many more... diff --git a/tutorials/tutorial14/tutorial.py b/tutorials/tutorial14/tutorial.py index ed423b4..835a50b 100644 --- a/tutorials/tutorial14/tutorial.py +++ b/tutorials/tutorial14/tutorial.py @@ -2,11 +2,11 @@ # coding: utf-8 # # Tutorial: Predicting Lid-driven cavity problem parameters with POD-RBF -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial14/tutorial.ipynb) -# In this tutorial we will show how to use the **PINA** library to predict the distributions of velocity and pressure the Lid-driven Cavity problem, a benchmark in Computational Fluid Dynamics. The problem consists of a square cavity with a lid on top moving with tangential velocity (by convention to the right), with the addition of no-slip conditions on the walls of the cavity and null static pressure on the lower left angle. -# +# In this tutorial we will show how to use the **PINA** library to predict the distributions of velocity and pressure the Lid-driven Cavity problem, a benchmark in Computational Fluid Dynamics. The problem consists of a square cavity with a lid on top moving with tangential velocity (by convention to the right), with the addition of no-slip conditions on the walls of the cavity and null static pressure on the lower left angle. +# # Our goal is to predict the distributions of velocity and pressure of the fluid inside the cavity as the Reynolds number of the inlet fluid varies. To do so we're using a Reduced Order Model (ROM) based on Proper Orthogonal Decomposition (POD). The parametric solution manifold is approximated here with Radial Basis Function (RBF) Interpolation, a common mesh-free interpolation method that doesn't require trainers or solvers as the found radial basis functions are used to interpolate new points. # Let's start with the necessary imports. We're particularly interested in the `PODBlock` and `RBFBlock` classes which will allow us to define the POD-RBF model. @@ -24,7 +24,7 @@ except: if IN_COLAB: get_ipython().system('pip install "pina-mathlab"') -get_ipython().run_line_magic("matplotlib", "inline") +get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt import torch @@ -38,7 +38,7 @@ warnings.filterwarnings("ignore") # In this tutorial we're gonna use the `LidCavity` class from the [Smithers](https://github.com/mathLab/Smithers) library, which contains a set of parametric solutions of the Lid-driven cavity problem in a square domain. The dataset consists of 300 snapshots of the parameter fields, which in this case are the magnitude of velocity and the pressure, and the corresponding parameter values $u$ and $p$. Each snapshot corresponds to a different value of the tangential velocity $\mu$ of the lid, which has been sampled uniformly between 0.01 m/s and 1 m/s. -# +# # Let's start by importing the dataset: # In[2]: @@ -252,7 +252,7 @@ print(f" Train: {relative_p_error_train.item():e}") print(f" Test: {relative_p_error_test.item():e}") -# Unfortunately here we obtain a very high relative test error, although this is likely due to the nature of the available data. Looking at the plots we can see that the pressure field is subject to high variations between subsequent snapshots, especially here: +# Unfortunately here we obtain a very high relative test error, although this is likely due to the nature of the available data. Looking at the plots we can see that the pressure field is subject to high variations between subsequent snapshots, especially here: # In[12]: @@ -328,11 +328,11 @@ print(f" Test: {new_relative_p_error_test.item():e}") # ## What's next? -# +# # Congratulations on completing the **PINA** tutorial on building and using a custom POD class! Now you can try: -# +# # 1. Varying the inputs of the model (for a list of the supported RB functions look at the `rbf_layer.py` file in `pina.layers`) -# +# # 2. Changing the POD model, for example using Artificial Neural Networks. For a more in depth overview of POD-NN and a comparison with the POD-RBF model already shown, look at [Tutorial: Reduced order model (POD-RBF or POD-NN) for parametric problems](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial9/tutorial.ipynb) -# +# # 3. Building your own classes or adapt the one shown to other datasets/problems diff --git a/tutorials/tutorial2/tutorial.py b/tutorials/tutorial2/tutorial.py index 622783a..a9b6713 100644 --- a/tutorials/tutorial2/tutorial.py +++ b/tutorials/tutorial2/tutorial.py @@ -2,11 +2,11 @@ # coding: utf-8 # # Tutorial: Two dimensional Poisson problem using Extra Features Learning -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial2/tutorial.ipynb) -# +# # This tutorial presents how to solve with Physics-Informed Neural Networks (PINNs) a 2D Poisson problem with Dirichlet boundary conditions. We will train with standard PINN's training, and with extrafeatures. For more insights on extrafeature learning please read [*An extended physics informed neural network for preliminary analysis of parametric optimal control problems*](https://www.sciencedirect.com/science/article/abs/pii/S0898122123002018). -# +# # First of all, some useful imports. # In[ ]: @@ -44,10 +44,10 @@ warnings.filterwarnings("ignore") # \end{cases} # \end{equation} # where $D$ is a square domain $[0,1]^2$, and $\Gamma_i$, with $i=1,...,4$, are the boundaries of the square. -# +# # The Poisson problem is written in **PINA** code as a class. The equations are written as *conditions* that should be satisfied in the corresponding domains. The *solution* # is the exact solution which will be compared with the predicted one. If interested in how to write problems see [this tutorial](https://mathlab.github.io/PINA/_rst/tutorials/tutorial1/tutorial.html). -# +# # We will directly import the problem from `pina.problem.zoo`, which contains a vast list of PINN problems and more. # In[2]: @@ -76,7 +76,7 @@ problem.discretise_domain( # ## Solving the problem with standard PINNs # After the problem, the feed-forward neural network is defined, through the class `FeedForward`. This neural network takes as input the coordinates (in this case $x$ and $y$) and provides the unkwown field of the Poisson problem. The residual of the equations are evaluated at several sampling points and the loss minimized by the neural network is the sum of the residuals. -# +# # In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs with a learning rate of 0.006 and $l_2$ weight regularization set to $10^{-8}$. These parameters can be modified as desired. We set the `train_size` to 0.8 and `test_size` to 0.2, this mean that the discretised points will be divided in a 80%-20% fashion, where 80% will be used for training and the remaining 20% for testing. # In[3]: @@ -112,7 +112,7 @@ trainer_base.train() # Now we plot the results using `matplotlib`. -# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed. +# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed. # In[4]: @@ -157,17 +157,17 @@ plot_solution(solver=pinn) # ## Solving the problem with extra-features PINNs # Now, the same problem is solved in a different way. -# A new neural network is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation. +# A new neural network is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation. # The set of input variables to the neural network is: -# +# # \begin{equation} # [x, y, k(x, y)], \text{ with } k(x, y)= 2\pi^2\sin{(\pi x)}\sin{(\pi y)}, # \end{equation} -# +# # where $x$ and $y$ are the spatial coordinates and $k(x, y)$ is the added feature which is equal to the forcing term. -# +# # This feature is initialized in the class `SinSin`, which is a simple `torch.nn.Module`. After declaring such feature, we can just adjust the `FeedForward` class by creating a subclass `FeedForwardWithExtraFeatures` with an adjusted forward method and the additional attribute `extra_features`. -# +# # Finally, we perform the same training as before: the problem is `Poisson`, the network is composed by the same number of neurons and optimizer parameters are equal to previous test, the only change is the new extra feature. # In[6]: @@ -236,14 +236,14 @@ plot_solution(solver=pinn_feat) # ## Solving the problem with learnable extra-features PINNs # We can still do better! -# +# # Another way to exploit the extra features is the addition of learnable parameter inside them. # In this way, the added parameters are learned during the training phase of the neural network. In this case, we use: -# +# # \begin{equation} # k(x, \mathbf{y}) = \beta \sin{(\alpha x)} \sin{(\alpha y)}, # \end{equation} -# +# # where $\alpha$ and $\beta$ are the abovementioned parameters. # Their implementation is quite trivial: by using the class `torch.nn.Parameter` we cam define all the learnable parameters we need, and they are managed by `autograd` module! @@ -348,13 +348,13 @@ _ = trainer_learn.test() # ## What's next? -# +# # Congratulations on completing the two dimensional Poisson tutorial of **PINA**! There are multiple directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the finaly accuracy -# +# # 2. Propose new types of extrafeatures and see how they affect the learning -# +# # 3. Exploit extrafeature training in more complex problems -# +# # 4. Many more... diff --git a/tutorials/tutorial3/tutorial.py b/tutorials/tutorial3/tutorial.py index 97ad5ed..8595c89 100644 --- a/tutorials/tutorial3/tutorial.py +++ b/tutorials/tutorial3/tutorial.py @@ -2,11 +2,11 @@ # coding: utf-8 # # Tutorial: Two dimensional Wave problem with hard constraint -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial3/tutorial.ipynb) -# +# # In this tutorial we present how to solve the wave equation using hard constraint PINNs. For doing so we will build a costum `torch` model and pass it to the `PINN` solver. -# +# # First of all, some useful imports. # In[ ]: @@ -37,10 +37,10 @@ from pina.callback import MetricTracker warnings.filterwarnings("ignore") -# ## The problem definition +# ## The problem definition # The problem is written in the following form: -# +# # \begin{equation} # \begin{cases} # \Delta u(x,y,t) = \frac{\partial^2}{\partial t^2} u(x,y,t) \quad \text{in } D, \\\\ @@ -48,7 +48,7 @@ warnings.filterwarnings("ignore") # u(x, y, t) = 0 \quad \text{on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4, # \end{cases} # \end{equation} -# +# # where $D$ is a squared domain $[0,1]^2$, and $\Gamma_i$, with $i=1,...,4$, are the boundaries of the square, and the velocity in the standard wave equation is fixed to one. # Now, the wave problem is written in PINA code as a class, inheriting from `SpatialProblem` and `TimeDependentProblem` since we deal with spatial, and time dependent variables. The equations are written as `conditions` that should be satisfied in the corresponding domains. `solution` is the exact solution which will be compared with the predicted one. @@ -111,9 +111,9 @@ problem = Wave() # ## Hard Constraint Model # After the problem, a **torch** model is needed to solve the PINN. Usually, many models are already implemented in **PINA**, but the user has the possibility to build his/her own model in `torch`. The hard constraint we impose is on the boundary of the spatial domain. Specifically, our solution is written as: -# +# # $$ u_{\rm{pinn}} = xy(1-x)(1-y)\cdot NN(x, y, t), $$ -# +# # where $NN$ is the neural net output. This neural network takes as input the coordinates (in this case $x$, $y$ and $t$) and provides the unknown field $u$. By construction, it is zero on the boundaries. The residuals of the equations are evaluated at several sampling points (which the user can manipulate using the method `discretise_domain`) and the loss minimized by the neural network is the sum of the residuals. # In[3]: @@ -240,11 +240,11 @@ plot_solution(solver=pinn, time=1) # The results are not so great, and we can clearly see that as time progresses the solution gets worse.... Can we do better? -# +# # A valid option is to impose the initial condition as hard constraint as well. Specifically, our solution is written as: -# +# # $$ u_{\rm{pinn}} = xy(1-x)(1-y)\cdot NN(x, y, t)\cdot t + \cos(\sqrt{2}\pi t)\sin(\pi x)\sin(\pi y), $$ -# +# # Let us build the network first # In[8]: @@ -324,13 +324,13 @@ plot_solution(solver=pinn, time=1) # We can see now that the results are way better! This is due to the fact that previously the network was not learning correctly the initial conditon, leading to a poor solution when time evolved. By imposing the initial condition the network is able to correctly solve the problem. # ## What's next? -# +# # Congratulations on completing the two dimensional Wave tutorial of **PINA**! There are multiple directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the finaly accuracy -# +# # 2. Propose new types of hard constraints in time, e.g. $$ u_{\rm{pinn}} = xy(1-x)(1-y)\cdot NN(x, y, t)(1-\exp(-t)) + \cos(\sqrt{2}\pi t)sin(\pi x)\sin(\pi y), $$ -# +# # 3. Exploit extrafeature training for model 1 and 2 -# +# # 4. Many more... diff --git a/tutorials/tutorial4/tutorial.py b/tutorials/tutorial4/tutorial.py index d4db53c..1a1b999 100644 --- a/tutorials/tutorial4/tutorial.py +++ b/tutorials/tutorial4/tutorial.py @@ -2,7 +2,7 @@ # coding: utf-8 # # Tutorial: Unstructured convolutional autoencoder via continuous convolution -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial4/tutorial.ipynb) # In this tutorial, we will show how to use the Continuous Convolutional Filter, and how to build common Deep Learning architectures with it. The implementation of the filter follows the original work [*A Continuous Convolutional Trainable Filter for Modelling Unstructured Data*](https://arxiv.org/abs/2210.13416). @@ -37,15 +37,15 @@ from pina.model import FeedForward # for building AE and MNIST classification warnings.filterwarnings("ignore") -# The tutorial is structured as follow: +# The tutorial is structured as follow: # * [Continuous filter background](#continuous-filter-background): understand how the convolutional filter works and how to use it. -# * [Building a MNIST Classifier](#building-a-mnist-classifier): show how to build a simple classifier using the MNIST dataset and how to combine a continuous convolutional layer with a feedforward neural network. +# * [Building a MNIST Classifier](#building-a-mnist-classifier): show how to build a simple classifier using the MNIST dataset and how to combine a continuous convolutional layer with a feedforward neural network. # * [Building a Continuous Convolutional Autoencoder](#building-a-continuous-convolutional-autoencoder): show how to use the continuous filter to work with unstructured data for autoencoding and up-sampling. # ## Continuous filter background # As reported by the authors in the original paper: in contrast to discrete convolution, continuous convolution is mathematically defined as: -# +# # $$ # \mathcal{I}_{\rm{out}}(\mathbf{x}) = \int_{\mathcal{X}} \mathcal{I}(\mathbf{x} + \mathbf{\tau}) \cdot \mathcal{K}(\mathbf{\tau}) d\mathbf{\tau}, # $$ @@ -53,7 +53,7 @@ warnings.filterwarnings("ignore") # $$ # \mathcal{I}_{\rm{out}}(\mathbf{\tilde{x}}_i) = \sum_{{\mathbf{x}_i}\in\mathcal{X}} \mathcal{I}(\mathbf{x}_i + \mathbf{\tau}) \cdot \mathcal{K}(\mathbf{x}_i), # $$ -# where $\mathbf{\tau} \in \mathcal{S}$, with $\mathcal{S}$ the set of available strides, corresponds to the current stride position of the filter, and $\mathbf{\tilde{x}}_i$ points are obtained by taking the centroid of the filter position mapped on the $\Omega$ domain. +# where $\mathbf{\tau} \in \mathcal{S}$, with $\mathcal{S}$ the set of available strides, corresponds to the current stride position of the filter, and $\mathbf{\tilde{x}}_i$ points are obtained by taking the centroid of the filter position mapped on the $\Omega$ domain. # We will now try to pratically see how to work with the filter. From the above definition we see that what is needed is: # 1. A domain and a function defined on that domain (the input) @@ -61,16 +61,16 @@ warnings.filterwarnings("ignore") # 3. The filter rectangular domain $\rightarrow$ `filter_dim` variable in `ContinuousConv` # ### Input function -# +# # The input function for the continuous filter is defined as a tensor of shape: $$[B \times N_{in} \times N \times D]$$ where $B$ is the batch_size, $N_{in}$ is the number of input fields, $N$ the number of points in the mesh, $D$ the dimension of the problem. In particular: # * $D$ is the number of spatial variables + 1. The last column must contain the field value. For example for 2D problems $D=3$ and the tensor will be something like `[first coordinate, second coordinate, field value]` -# * $N_{in}$ represents the number of vectorial function presented. For example a vectorial function $f = [f_1, f_2]$ will have $N_{in}=2$ -# +# * $N_{in}$ represents the number of vectorial function presented. For example a vectorial function $f = [f_1, f_2]$ will have $N_{in}=2$ +# # Let's see an example to clear the ideas. We will be verbose to explain in details the input form. We wish to create the function: # $$ # f(x, y) = [\sin(\pi x) \sin(\pi y), -\sin(\pi x) \sin(\pi y)] \quad (x,y)\in[0,1]\times[0,1] # $$ -# +# # using a batch size equal to 1. # In[2]: @@ -106,9 +106,9 @@ print(f"Filter input data has shape: {data.shape}") # ### Stride -# +# # The stride is passed as a dictionary `stride` which tells the filter where to go. Here is an example for the $[0,1]\times[0,5]$ domain: -# +# # ```python # # stride definition # stride = {"domain": [1, 5], @@ -122,15 +122,15 @@ print(f"Filter input data has shape: {data.shape}") # 2. `start`: start position of the filter, coordinate $(0, 0)$ # 3. `jump`: the jumps of the centroid of the filter to the next position $(0.1, 0.3)$ # 4. `direction`: the directions of the jump, with `1 = right`, `0 = no jump`, `-1 = left` with respect to the current position -# +# # **Note** -# +# # We are planning to release the possibility to directly pass a list of possible strides! # ### Filter definition -# +# # Having defined all the previous blocks, we are now able to construct the continuous filter. -# +# # Suppose we would like to get an output with only one field, and let us fix the filter dimension to be $[0.1, 0.1]$. # In[3]: @@ -184,8 +184,8 @@ output = cConv(data) print(f"Filter output data has shape: {output.shape}") -# If we don't want to use the default `FeedForward` neural network, we can pass a specified torch model in the `model` keyword as follow: -# +# If we don't want to use the default `FeedForward` neural network, we can pass a specified torch model in the `model` keyword as follow: +# # In[6]: @@ -218,7 +218,7 @@ cConv = ContinuousConvBlock( # Notice that we pass the class and not an already built object! # ## Building a MNIST Classifier -# +# # Let's see how we can build a MNIST classifier using a continuous convolutional filter. We will use the MNIST dataset from PyTorch. In order to keep small training times we use only 6000 samples for training and 1000 samples for testing. # In[7]: @@ -374,7 +374,7 @@ print(f"Accuracy of the network on the test images: {(correct / total):.3%}") # As we can see we have very good performance for having trained only for 1 epoch! Nevertheless, we are still using structured data... Let's see how we can build an autoencoder for unstructured data now. # ## Building a Continuous Convolutional Autoencoder -# +# # Just as toy problem, we will now build an autoencoder for the following function $f(x,y)=\sin(\pi x)\sin(\pi y)$ on the unit circle domain centered in $(0.5, 0.5)$. We will also see the ability to up-sample (once trained) the results without retraining. Let's first create the input and visualize it, we will use firstly a mesh of $100$ points. # In[12]: @@ -488,7 +488,7 @@ class Decoder(torch.nn.Module): return torch.sigmoid(self.convolution.transpose(x, grid)) -# Very good! Notice that in the `Decoder` class in the `forward` pass we have used the `.transpose()` method of the `ContinuousConvolution` class. This method accepts the `weights` for upsampling and the `grid` on where to upsample. Let's now build the autoencoder! We set the hidden dimension in the `hidden_dimension` variable. We apply the sigmoid on the output since the field value is between $[0, 1]$. +# Very good! Notice that in the `Decoder` class in the `forward` pass we have used the `.transpose()` method of the `ContinuousConvolution` class. This method accepts the `weights` for upsampling and the `grid` on where to upsample. Let's now build the autoencoder! We set the hidden dimension in the `hidden_dimension` variable. We apply the sigmoid on the output since the field value is between $[0, 1]$. # In[14]: @@ -580,7 +580,7 @@ print(f"l2 error: {l2_error(input_data[0, 0, :, -1], output[0, 0, :, -1]):.2%}") # More or less $4\%$ in $l_2$ error, which is really low considering the fact that we use just **one** convolutional layer and a simple feedforward to decrease the dimension. Let's see now some peculiarity of the filter. # ### Filter for upsampling -# +# # Suppose we have already the hidden representation and we want to upsample on a differen grid with more points. Let's see how to do it: # In[18]: @@ -666,11 +666,11 @@ print( # ## What's next? -# +# # We have shown the basic usage of a convolutional filter. There are additional extensions possible: -# +# # 1. Train using Physics Informed strategies -# +# # 2. Use the filter to build an unstructured convolutional autoencoder for reduced order modelling -# +# # 3. Many more... diff --git a/tutorials/tutorial5/tutorial.py b/tutorials/tutorial5/tutorial.py index 7a835c7..4f2d9b1 100644 --- a/tutorials/tutorial5/tutorial.py +++ b/tutorials/tutorial5/tutorial.py @@ -2,9 +2,9 @@ # coding: utf-8 # # Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial5/tutorial.ipynb) -# +# # In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for # Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input-output operations. @@ -21,11 +21,9 @@ except: IN_COLAB = False if IN_COLAB: get_ipython().system('pip install "pina-mathlab"') - get_ipython().system("pip install scipy") + get_ipython().system('pip install scipy') # get the data - get_ipython().system( - "wget https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial5/Data_Darcy.mat" - ) + get_ipython().system('wget https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial5/Data_Darcy.mat') import torch import matplotlib.pyplot as plt @@ -42,15 +40,15 @@ warnings.filterwarnings("ignore") # ## Data Generation -# +# # We will focus on solving a specific PDE, the **Darcy Flow** equation. The Darcy PDE is a second-order elliptic PDE with the following form: -# +# # $$ # -\nabla\cdot(k(x, y)\nabla u(x, y)) = f(x) \quad (x, y) \in D. # $$ -# +# # Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference. -# +# # In[2]: @@ -93,7 +91,7 @@ problem = SupervisedProblem( # ## Solving the problem with a FeedForward Neural Network -# +# # We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning. # In[5]: @@ -147,7 +145,7 @@ print(f"Final error testing {err:.2f}%") # ## Solving the problem with a Fourier Neural Operator (FNO) -# +# # We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see. # In[7]: @@ -207,5 +205,5 @@ print(f"Final error testing {err:.2f}%") # As we can see the loss is way lower! # ## What's next? -# +# # We have made a very simple example on how to use the `FNO` for learning neural operator. Currently in **PINA** we implement 1D/2D/3D cases. We suggest to extend the tutorial using more complex problems and train for longer, to see the full potential of neural operators. diff --git a/tutorials/tutorial6/tutorial.py b/tutorials/tutorial6/tutorial.py index b355184..295b760 100644 --- a/tutorials/tutorial6/tutorial.py +++ b/tutorials/tutorial6/tutorial.py @@ -2,15 +2,15 @@ # coding: utf-8 # # Tutorial: Building custom geometries with PINA `DomainInterface` class -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial6/tutorial.ipynb) -# +# # In this tutorial we will show how to use geometries in PINA. Specifically, the tutorial will include how to create geometries and how to visualize them. The topics covered are: -# +# # * Creating CartesianDomains and EllipsoidDomains # * Getting the Union and Difference of Geometries # * Sampling points in the domain (and visualize them) -# +# # We import the relevant modules first. # In[ ]: @@ -58,8 +58,8 @@ ellipsoid_border = EllipsoidDomain( ) -# The `{'x': [0, 2], 'y': [0, 2]}` are the bounds of the `CartesianDomain` being created. -# +# The `{'x': [0, 2], 'y': [0, 2]}` are the bounds of the `CartesianDomain` being created. +# # To visualize these shapes, we need to sample points on them. We will use the `sample` method of the `CartesianDomain` and `EllipsoidDomain` classes. This method takes a `n` argument which is the number of points to sample. It also takes different modes to sample, such as `'random'`. # In[ ]: @@ -81,7 +81,7 @@ print(f"Ellipsoid Border Samples: {ellipsoid_border_samples}") # Notice how these are all `LabelTensor` objects. You can read more about these in the [documentation](https://mathlab.github.io/PINA/_rst/label_tensor.html). At a very high level, they are tensors where each element in a tensor has a label that we can access by doing `.labels`. We can also access the values of the tensor by doing `.extract(['x'])`. -# +# # We are now ready to visualize the samples using matplotlib. # In[ ]: @@ -101,7 +101,7 @@ for ax, pts, title in zip(axs, pts_list, title_list): # We have now created, sampled, and visualized our first geometries! We can see that the `EllipsoidDomain` with the border has a border around it. We can also see that the `EllipsoidDomain` without the border is just the ellipse. We can also see that the `CartesianDomain` is just a square. # ### Simplex Domain -# +# # Among the built-in shapes, we quickly show here the usage of `SimplexDomain`, which can be used for polygonal domains! # In[ ]: @@ -135,7 +135,7 @@ for domain, ax in zip([spatial_domain, spatial_domain2], axs): # ## Boolean Operations # To create complex shapes we can use the boolean operations, for example to merge two default geometries. We need to simply use the `Union` class: it takes a list of geometries and returns the union of them. -# +# # Let's create three unions. Firstly, it will be a union of `cartesian` and `ellipsoid_no_border`. Next, it will be a union of `ellipse_no_border` and `ellipse_border`. Lastly, it will be a union of all three geometries. # In[7]: @@ -235,7 +235,7 @@ class Heart(DomainInterface): pass -# Now we have the skeleton for our `Heart` class. Also the `sample` method is where most of the work is done so let's fill it out. +# Now we have the skeleton for our `Heart` class. Also the `sample` method is where most of the work is done so let's fill it out. # In[ ]: @@ -289,5 +289,5 @@ plot_scatter(ax, pts_heart, "Heart Domain") # ## What's next? -# -# We have made a very simple tutorial on how to build custom geometries and use domain operation to compose base geometries. Now you can play around with different geometries and build your own! +# +# We have made a very simple tutorial on how to build custom geometries and use domain operation to compose base geometries. Now you can play around with different geometries and build your own! diff --git a/tutorials/tutorial7/tutorial.py b/tutorials/tutorial7/tutorial.py index 69d51d7..9237e70 100644 --- a/tutorials/tutorial7/tutorial.py +++ b/tutorials/tutorial7/tutorial.py @@ -2,7 +2,7 @@ # coding: utf-8 # # Tutorial: Resolution of an inverse problem -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial7/tutorial.ipynb) # ### Introduction to the inverse problem @@ -16,11 +16,11 @@ # \end{cases} # \end{equation} # where $\Omega$ is a square domain $[-2, 2] \times [-2, 2]$, and $\partial \Omega=\Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4$ is the union of the boundaries of the domain. -# +# # This kind of problem, namely the "inverse problem", has two main goals: # - find the solution $u$ that satisfies the Poisson equation; # - find the unknown parameters ($\mu_1$, $\mu_2$) that better fit some given data (third equation in the system above). -# +# # In order to achieve both goals we will need to define an `InverseProblem` in PINA. # Let's start with useful imports. @@ -39,12 +39,8 @@ if IN_COLAB: get_ipython().system('pip install "pina-mathlab"') # get the data get_ipython().system('mkdir "data"') - get_ipython().system( - 'wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pinn_solution_0.5_0.5" -O "data/pinn_solution_0.5_0.5"' - ) - get_ipython().system( - 'wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pts_0.5_0.5" -O "data/pts_0.5_0.5"' - ) + get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pinn_solution_0.5_0.5" -O "data/pinn_solution_0.5_0.5"') + get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pts_0.5_0.5" -O "data/pts_0.5_0.5"') import matplotlib.pyplot as plt import torch @@ -249,11 +245,11 @@ plt.show() # ## What's next? -# +# # We have shown the basic usage PINNs in inverse problem modelling, further extensions include: -# +# # 1. Train using different Physics Informed strategies -# +# # 2. Try on more complex problems -# +# # 3. Many more... diff --git a/tutorials/tutorial8/tutorial.py b/tutorials/tutorial8/tutorial.py index 4f3f5bf..374019f 100644 --- a/tutorials/tutorial8/tutorial.py +++ b/tutorials/tutorial8/tutorial.py @@ -2,11 +2,11 @@ # coding: utf-8 # # Tutorial: Reduced order models (POD-NN and POD-RBF) for parametric problems -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial9/tutorial.ipynb) # The tutorial aims to show how to employ the **PINA** library in order to apply a reduced order modeling technique [1]. Such methodologies have several similarities with machine learning approaches, since the main goal consists in predicting the solution of differential equations (typically parametric PDEs) in a real-time fashion. -# +# # In particular we are going to use the Proper Orthogonal Decomposition with either Radial Basis Function Interpolation (POD-RBF) or Neural Network (POD-NN) [2]. Here we basically perform a dimensional reduction using the POD approach, approximating the parametric solution manifold (at the reduced space) using a regression technique (NN) and comparing it to an RBF interpolation. In this example, we use a simple multilayer perceptron, but the plenty of different architectures can be plugged as well. # Let's start with the necessary imports. @@ -25,7 +25,7 @@ except: if IN_COLAB: get_ipython().system('pip install "pina-mathlab"') -get_ipython().run_line_magic("matplotlib", "inline") +get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib import matplotlib.pyplot as plt @@ -45,7 +45,7 @@ warnings.filterwarnings("ignore") # We exploit the [Smithers](https://github.com/mathLab/Smithers) library to collect the parametric snapshots. In particular, we use the `NavierStokesDataset` class that contains a set of parametric solutions of the Navier-Stokes equations in a 2D L-shape domain. The parameter is the inflow velocity. # The dataset is composed by 500 snapshots of the velocity (along $x$, $y$, and the magnitude) and pressure fields, and the corresponding parameter values. -# +# # To visually check the snapshots, let's plot also the data points and the reference solution: this is the expected output of our model. # In[83]: @@ -62,7 +62,7 @@ for ax, p, u in zip(axs, dataset.params[:4], dataset.snapshots["mag(v)"][:4]): # The *snapshots* - aka the numerical solutions computed for several parameters - and the corresponding parameters are the only data we need to train the model, in order to predict the solution for any new test parameter. To properly validate the accuracy, we will split the 500 snapshots into the training dataset (90% of the original data) and the testing one (the reamining 10%) inside the `Trainer`. -# +# # It is now time to define the problem! # In[84]: @@ -311,5 +311,5 @@ plt.show() # #### References -# 1. Rozza G., Stabile G., Ballarin F. (2022). Advanced Reduced Order Methods and Applications in Computational Fluid Dynamics, Society for Industrial and Applied Mathematics. +# 1. Rozza G., Stabile G., Ballarin F. (2022). Advanced Reduced Order Methods and Applications in Computational Fluid Dynamics, Society for Industrial and Applied Mathematics. # 2. Hesthaven, J. S., & Ubbiali, S. (2018). Non-intrusive reduced order modeling of nonlinear problems using neural networks. Journal of Computational Physics, 363, 55-78. diff --git a/tutorials/tutorial9/tutorial.py b/tutorials/tutorial9/tutorial.py index ae03c18..311e9b7 100644 --- a/tutorials/tutorial9/tutorial.py +++ b/tutorials/tutorial9/tutorial.py @@ -2,16 +2,16 @@ # coding: utf-8 # # Tutorial: One dimensional Helmholtz equation using Periodic Boundary Conditions -# +# # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial9/tutorial.ipynb) -# +# # This tutorial presents how to solve with Physics-Informed Neural Networks (PINNs) # a one dimensional Helmholtz equation with periodic boundary conditions (PBC). # We will train with standard PINN's training by augmenting the input with # periodic expansion as presented in [*An expert’s guide to training # physics-informed neural networks*]( # https://arxiv.org/abs/2308.08468). -# +# # First of all, some useful imports. # In[ ]: @@ -45,7 +45,7 @@ warnings.filterwarnings("ignore") # ## The problem definition -# +# # The one-dimensional Helmholtz problem is mathematically written as: # $$ # \begin{cases} @@ -56,13 +56,13 @@ warnings.filterwarnings("ignore") # In this case we are asking the solution to be $C^{\infty}$ periodic with # period $2$, on the infinite domain $x\in(-\infty, \infty)$. Notice that the # classical PINN would need infinite conditions to evaluate the PBC loss function, -# one for each derivative, which is of course infeasible... +# one for each derivative, which is of course infeasible... # A possible solution, diverging from the original PINN formulation, # is to use *coordinates augmentation*. In coordinates augmentation you seek for # a coordinates transformation $v$ such that $x\rightarrow v(x)$ such that # the periodicity condition $ u^{(m)}(x=0) - u^{(m)}(x=2) = 0 \quad m\in[0, 1, \cdots] $ is # satisfied. -# +# # For demonstration purposes, the problem specifics are $\lambda=-10\pi^2$, # and $f(x)=-6\pi^2\sin(3\pi x)\cos(\pi x)$ which give a solution that can be # computed analytically $u(x) = \sin(\pi x)\cos(3\pi x)$. @@ -104,7 +104,7 @@ problem = Helmholtz() problem.discretise_domain(200, "grid", domains=["phys_cond"]) -# As usual, the Helmholtz problem is written in **PINA** code as a class. +# As usual, the Helmholtz problem is written in **PINA** code as a class. # The equations are written as `conditions` that should be satisfied in the # corresponding domains. The `solution` # is the exact solution which will be compared with the predicted one. We used @@ -121,7 +121,7 @@ problem.discretise_domain(200, "grid", domains=["phys_cond"]) # arbitrary dimension, see [*A method for representing periodic functions and # enforcing exactly periodic boundary conditions with # deep neural networks*](https://arxiv.org/pdf/2007.07442). -# +# # In our case, we rewrite # $v(x) = \left[1, \cos\left(\frac{2\pi}{L} x\right), # \sin\left(\frac{2\pi}{L} x\right)\right]$, i.e @@ -129,10 +129,10 @@ problem.discretise_domain(200, "grid", domains=["phys_cond"]) # network. The resulting neural network obtained by composing $f$ with $v$ gives # the PINN approximate solution, that is # $u(x) \approx u_{\theta}(x)=NN_{\theta}(v(x))$. -# +# # In **PINA** this translates in using the `PeriodicBoundaryEmbedding` layer for $v$, and any -# `pina.model` for $NN_{\theta}$. Let's see it in action! -# +# `pina.model` for $NN_{\theta}$. Let's see it in action! +# # In[16]: @@ -151,7 +151,7 @@ model = torch.nn.Sequential( # As simple as that! Notice that in higher dimension you can specify different periods # for all dimensions using a dictionary, e.g. `periods={'x':2, 'y':3, ...}` # would indicate a periodicity of $2$ in $x$, $3$ in $y$, and so on... -# +# # We will now solve the problem as usually with the `PINN` and `Trainer` class, then we will look at the losses using the `MetricTracker` callback from `pina.callback`. # In[17]: @@ -232,15 +232,15 @@ with torch.no_grad(): # It is pretty clear that the network is periodic, with also the error following a periodic pattern. Obviously a longer training and a more expressive neural network could improve the results! -# +# # ## What's next? -# +# # Congratulations on completing the one dimensional Helmholtz tutorial of **PINA**! There are multiple directions you can go now: -# +# # 1. Train the network for longer or with different layer sizes and assert the finaly accuracy -# +# # 2. Apply the `PeriodicBoundaryEmbedding` layer for a time-dependent problem (see reference in the documentation) -# +# # 3. Exploit extrafeature training ? -# +# # 4. Many more...