Tutorials and Doc (#191)
* Tutorial doc update * update doc tutorial * doc not compiling --------- Co-authored-by: Dario Coscia <dcoscia@euclide.maths.sissa.it> Co-authored-by: Dario Coscia <dariocoscia@Dario-Coscia.local>
This commit is contained in:
101
tutorials/tutorial5/tutorial.ipynb
vendored
101
tutorials/tutorial5/tutorial.ipynb
vendored
@@ -5,7 +5,7 @@
|
||||
"id": "e80567a6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Tutorial 5: Fourier Neural Operator Learning"
|
||||
"# Tutorial: Two dimensional Darcy flow using the Fourier Neural Operator"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -13,8 +13,8 @@
|
||||
"id": "8762bbe5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this tutorial we are going to solve the Darcy flow 2d problem, presented in [Fourier Neural Operator for\n",
|
||||
"Parametric Partial Differential Equation](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input output operation, run `pip install scipy` for installing it."
|
||||
"In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for\n",
|
||||
"Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input output operations."
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -22,18 +22,9 @@
|
||||
"execution_count": 1,
|
||||
"id": "5f2744dc",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/opt/sissa/apps/intelpython/2022.0.2/intelpython/latest/lib/python3.9/site-packages/scipy/__init__.py:138: UserWarning: A NumPy version >=1.16.5 and <1.23.0 is required for this version of SciPy (detected version 1.26.0)\n",
|
||||
" warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion} is required for this version of \"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# !pip install scipy # install scipy\n",
|
||||
"from scipy import io\n",
|
||||
"import torch\n",
|
||||
"from pina.model import FNO, FeedForward # let's import some models\n",
|
||||
@@ -63,7 +54,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 17,
|
||||
"id": "2ffb8a4c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -71,9 +62,9 @@
|
||||
"# download the dataset\n",
|
||||
"data = io.loadmat(\"Data_Darcy.mat\")\n",
|
||||
"\n",
|
||||
"# extract data\n",
|
||||
"k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)\n",
|
||||
"u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)\n",
|
||||
"# extract data (we use only 100 data for train)\n",
|
||||
"k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]\n",
|
||||
"u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]\n",
|
||||
"k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1)\n",
|
||||
"u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1)\n",
|
||||
"x = torch.tensor(data['x'], dtype=torch.float)[0]\n",
|
||||
@@ -90,7 +81,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 18,
|
||||
"id": "c8501b6f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -125,7 +116,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 19,
|
||||
"id": "8b27d283",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -152,7 +143,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 20,
|
||||
"id": "e34f18b0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -160,35 +151,16 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/u/n/ndemo/.local/lib/python3.9/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML\n",
|
||||
" warnings.warn(\"Can't initialize NVML\")\n",
|
||||
"GPU available: True (cuda), used: True\n",
|
||||
"GPU available: False, used: False\n",
|
||||
"TPU available: False, using: 0 TPU cores\n",
|
||||
"IPU available: False, using: 0 IPUs\n",
|
||||
"HPU available: False, using: 0 HPUs\n",
|
||||
"Missing logger folder: /u/n/ndemo/PINA/tutorials/tutorial5/lightning_logs\n",
|
||||
"2023-10-17 10:41:03.316644: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n",
|
||||
"2023-10-17 10:41:03.333768: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n",
|
||||
"2023-10-17 10:41:03.383188: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
||||
"To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
|
||||
"2023-10-17 10:41:07.712785: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
|
||||
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
|
||||
"\n",
|
||||
" | Name | Type | Params\n",
|
||||
"----------------------------------------\n",
|
||||
"0 | _loss | MSELoss | 0 \n",
|
||||
"1 | _neural_net | Network | 481 \n",
|
||||
"----------------------------------------\n",
|
||||
"481 Trainable params\n",
|
||||
"0 Non-trainable params\n",
|
||||
"481 Total params\n",
|
||||
"0.002 Total estimated model params size (MB)\n"
|
||||
"HPU available: False, using: 0 HPUs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "eb573678e5d94f0490ce09817a06f5cb",
|
||||
"model_id": "40f63403b97248a88e49755e8cb096fc",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
@@ -203,22 +175,20 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/u/n/ndemo/.local/lib/python3.9/site-packages/torch/_tensor.py:1386: UserWarning: The use of `x.T` on tensors of dimension other than 2 to reverse their shape is deprecated and it will throw an error in a future release. Consider `x.mT` to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse the dimensions of a tensor. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3614.)\n",
|
||||
" ret = func(*args, **kwargs)\n",
|
||||
"`Trainer.fit` stopped: `max_epochs=100` reached.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# make model\n",
|
||||
"model=FeedForward(input_dimensions=1, output_dimensions=1)\n",
|
||||
"model = FeedForward(input_dimensions=1, output_dimensions=1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# make solver\n",
|
||||
"solver = SupervisedSolver(problem=problem, model=model)\n",
|
||||
"\n",
|
||||
"# make the trainer and train\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=100)\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)\n",
|
||||
"trainer.train()\n"
|
||||
]
|
||||
},
|
||||
@@ -232,7 +202,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"execution_count": 21,
|
||||
"id": "0e2a6aa4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -240,8 +210,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 56.86%\n",
|
||||
"Final error testing 56.82%\n"
|
||||
"Final error training 56.24%\n",
|
||||
"Final error testing 55.95%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -271,7 +241,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"execution_count": 22,
|
||||
"id": "9af523a5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -279,27 +249,16 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"GPU available: True (cuda), used: True\n",
|
||||
"GPU available: False, used: False\n",
|
||||
"TPU available: False, using: 0 TPU cores\n",
|
||||
"IPU available: False, using: 0 IPUs\n",
|
||||
"HPU available: False, using: 0 HPUs\n",
|
||||
"LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
|
||||
"\n",
|
||||
" | Name | Type | Params\n",
|
||||
"----------------------------------------\n",
|
||||
"0 | _loss | MSELoss | 0 \n",
|
||||
"1 | _neural_net | Network | 591 K \n",
|
||||
"----------------------------------------\n",
|
||||
"591 K Trainable params\n",
|
||||
"0 Non-trainable params\n",
|
||||
"591 K Total params\n",
|
||||
"2.364 Total estimated model params size (MB)\n"
|
||||
"HPU available: False, using: 0 HPUs\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "0f7225d39f7241e692c6027c72adfd5f",
|
||||
"model_id": "5328859a5d9344ddb818622fd058d2a5",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
@@ -314,7 +273,7 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"`Trainer.fit` stopped: `max_epochs=20` reached.\n"
|
||||
"`Trainer.fit` stopped: `max_epochs=100` reached.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -334,7 +293,7 @@
|
||||
"solver = SupervisedSolver(problem=problem, model=model)\n",
|
||||
"\n",
|
||||
"# make the trainer and train\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=20)\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)\n",
|
||||
"trainer.train()\n"
|
||||
]
|
||||
},
|
||||
@@ -343,12 +302,12 @@
|
||||
"id": "84964cb9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can clearly see that with 1/3 of the total epochs the loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training."
|
||||
"We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"execution_count": 23,
|
||||
"id": "58e2db89",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
@@ -356,8 +315,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 26.19%\n",
|
||||
"Final error testing 25.89%\n"
|
||||
"Final error training 10.86%\n",
|
||||
"Final error testing 12.77%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
Reference in New Issue
Block a user