Update tutorials 1 through 12 to current version 0.2

This commit is contained in:
Matteo Bertocchi
2025-02-26 16:21:12 +01:00
committed by Nicola Demo
parent 8b797d589a
commit d83ca3af6e
82 changed files with 1074 additions and 1224 deletions

View File

@@ -80,7 +80,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 1,
"id": "2373a925",
"metadata": {},
"outputs": [],
@@ -134,13 +134,21 @@
},
{
"cell_type": "code",
"execution_count": 28,
"execution_count": null,
"id": "f2608e2e",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/matte_b/PINA/pina/operators.py: DeprecationWarning: 'pina.operators' is deprecated and will be removed in future versions. Please use 'pina.operator' instead.\n"
]
}
],
"source": [
"from pina.problem import SpatialProblem\n",
"from pina.operators import grad\n",
"from pina.operator import grad\n",
"from pina import Condition\n",
"from pina.domain import CartesianDomain\n",
"from pina.equation import Equation, FixedValue\n",
@@ -209,20 +217,20 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 3,
"id": "09ce5c3a",
"metadata": {},
"outputs": [],
"source": [
"# sampling 20 points in [0, 1] through discretization in all locations\n",
"problem.discretise_domain(n=20, mode='grid', variables=['x'], domains='all')\n",
"problem.discretise_domain(n=20, mode='grid', domains='all')\n",
"\n",
"# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0\n",
"problem.discretise_domain(n=20, mode='latin', variables=['x'], domains=['D'])\n",
"problem.discretise_domain(n=1, mode='random', variables=['x'], domains=['x0'])\n",
"problem.discretise_domain(n=20, mode='latin', domains=['D'])\n",
"problem.discretise_domain(n=1, mode='random', domains=['x0'])\n",
"\n",
"# sampling 20 points in (0, 1) randomly\n",
"problem.discretise_domain(n=20, mode='random', variables=['x'])"
"problem.discretise_domain(n=20, mode='random')"
]
},
{
@@ -235,7 +243,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 4,
"id": "329962b6",
"metadata": {},
"outputs": [],
@@ -255,7 +263,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 5,
"id": "d6ed9aaf",
"metadata": {},
"outputs": [
@@ -282,26 +290,26 @@
" [0.],\n",
" [0.],\n",
" [0.],\n",
" [0.]]), 'D': LabelTensor([[0.4156],\n",
" [0.8975],\n",
" [0.5223],\n",
" [0.5617],\n",
" [0.3636],\n",
" [0.2104],\n",
" [0.0502],\n",
" [0.4684],\n",
" [0.6188],\n",
" [0.9159],\n",
" [0.7120],\n",
" [0.1375],\n",
" [0.8148],\n",
" [0.0322],\n",
" [0.3204],\n",
" [0.1807],\n",
" [0.2869],\n",
" [0.7945],\n",
" [0.6901],\n",
" [0.9740]])}\n",
" [0.]]), 'D': LabelTensor([[0.1420],\n",
" [0.3743],\n",
" [0.7738],\n",
" [0.2501],\n",
" [0.5195],\n",
" [0.1846],\n",
" [0.8313],\n",
" [0.0020],\n",
" [0.0973],\n",
" [0.6215],\n",
" [0.4345],\n",
" [0.6944],\n",
" [0.2031],\n",
" [0.5723],\n",
" [0.9332],\n",
" [0.7015],\n",
" [0.4865],\n",
" [0.3176],\n",
" [0.8969],\n",
" [0.9800]])}\n",
"Input points labels: ['x']\n"
]
}
@@ -321,7 +329,7 @@
},
{
"cell_type": "code",
"execution_count": 32,
"execution_count": 6,
"id": "33cc80bc",
"metadata": {},
"outputs": [],
@@ -352,7 +360,7 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 7,
"id": "3bb4dc9b",
"metadata": {},
"outputs": [
@@ -360,7 +368,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"GPU available: True (mps), used: False\n",
"/home/matte_b/PINA/pina/solvers/__init__.py: DeprecationWarning: 'pina.solvers' is deprecated and will be removed in future versions. Please use 'pina.solver' instead.\n",
"/home/matte_b/PINA/pina/callbacks/__init__.py: DeprecationWarning: 'pina.callbacks' is deprecated and will be removed in future versions. Please use 'pina.callback' instead.\n",
"GPU available: False, used: False\n",
"TPU available: False, using: 0 TPU cores\n",
"HPU available: False, using: 0 HPUs\n"
]
@@ -369,7 +379,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1499: 100%|██████████| 1/1 [00:00<00:00, 67.42it/s, v_num=2, train_loss_step=0.00468, val_loss=0.00466, train_loss_epoch=0.00468] "
"Epoch 1499: 100%|██████████| 1/1 [00:00<00:00, 20.24it/s, v_num=90, val_loss=0.0191, bound_cond_loss=4.18e-5, phys_cond_loss=0.00118, train_loss=0.00122] "
]
},
{
@@ -383,7 +393,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1499: 100%|██████████| 1/1 [00:00<00:00, 56.83it/s, v_num=2, train_loss_step=0.00468, val_loss=0.00466, train_loss_epoch=0.00468]\n"
"Epoch 1499: 100%|██████████| 1/1 [00:00<00:00, 16.69it/s, v_num=90, val_loss=0.0191, bound_cond_loss=4.18e-5, phys_cond_loss=0.00118, train_loss=0.00122]\n"
]
}
],
@@ -422,19 +432,20 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 8,
"id": "f5fbf362",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'train_loss_step': tensor(0.0047),\n",
" 'val_loss': tensor(0.0047),\n",
" 'train_loss_epoch': tensor(0.0047)}"
"{'val_loss': tensor(0.0191),\n",
" 'bound_cond_loss': tensor(4.1773e-05),\n",
" 'phys_cond_loss': tensor(0.0012),\n",
" 'train_loss': tensor(0.0012)}"
]
},
"execution_count": 34,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
@@ -454,7 +465,7 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 9,
"id": "19078eb5",
"metadata": {},
"outputs": [],
@@ -473,7 +484,7 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 10,
"id": "bf6211e6",
"metadata": {},
"outputs": [],
@@ -509,11 +520,8 @@
}
],
"metadata": {
"interpreter": {
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
@@ -527,7 +535,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.7"
"version": "3.12.3"
}
},
"nbformat": 4,

View File

@@ -53,7 +53,7 @@
# What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`:
#
# In[ ]:
# In[1]:
## routine needed to run the notebook on Google Colab
@@ -89,11 +89,11 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
#
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
# In[2]:
# In[ ]:
from pina.problem import SpatialProblem
from pina.operators import grad
from pina.operator import grad
from pina import Condition
from pina.domain import CartesianDomain
from pina.equation import Equation, FixedValue
@@ -106,6 +106,11 @@ class SimpleODE(SpatialProblem):
output_variables = ['u']
spatial_domain = CartesianDomain({'x': [0, 1]})
domains ={
'x0': CartesianDomain({'x': 0.}),
'D': CartesianDomain({'x': [0, 1]})
}
# defining the ode equation
def ode_equation(input_, output_):
@@ -120,13 +125,10 @@ class SimpleODE(SpatialProblem):
# conditions to hold
conditions = {
'x0': Condition(location=CartesianDomain({'x': 0.}), equation=FixedValue(1)), # We fix initial condition to value 1
'D': Condition(location=CartesianDomain({'x': [0, 1]}), equation=Equation(ode_equation)), # We wrap the python equation using Equation
'bound_cond': Condition(domain='x0', equation=FixedValue(1.)),
'phys_cond': Condition(domain='D', equation=Equation(ode_equation))
}
# sampled points (see below)
input_pts = None
# defining the true solution
def truth_solution(self, pts):
return torch.exp(pts.extract(['x']))
@@ -149,14 +151,14 @@ problem = SimpleODE()
# sampling 20 points in [0, 1] through discretization in all locations
problem.discretise_domain(n=20, mode='grid', variables=['x'], locations='all')
problem.discretise_domain(n=20, mode='grid', domains='all')
# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0
problem.discretise_domain(n=20, mode='latin', variables=['x'], locations=['D'])
problem.discretise_domain(n=1, mode='random', variables=['x'], locations=['x0'])
problem.discretise_domain(n=20, mode='latin', domains=['D'])
problem.discretise_domain(n=1, mode='random', domains=['x0'])
# sampling 20 points in (0, 1) randomly
problem.discretise_domain(n=20, mode='random', variables=['x'])
problem.discretise_domain(n=20, mode='random')
# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
@@ -165,8 +167,8 @@ problem.discretise_domain(n=20, mode='random', variables=['x'])
# sampling for training
problem.discretise_domain(1, 'random', locations=['x0'])
problem.discretise_domain(20, 'lh', locations=['D'])
problem.discretise_domain(20, 'random', domains=['x0']) # TODO check
problem.discretise_domain(20, 'lh', domains=['D'])
# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem
@@ -174,26 +176,26 @@ problem.discretise_domain(20, 'lh', locations=['D'])
# In[5]:
print('Input points:', problem.input_pts)
print('Input points labels:', problem.input_pts['D'].labels)
print('Input points:', problem.discretised_domains)
print('Input points labels:', problem.discretised_domains['D'].labels)
# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class
# In[5]:
# In[6]:
from pina import Plotter
#from pina import Plotter
pl = Plotter()
pl.plot_samples(problem=problem)
#pl = Plotter()
#pl.plot_samples(problem=problem)
# ## Perform a small training
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
# In[ ]:
# In[7]:
from pina import Trainer
@@ -222,7 +224,7 @@ trainer.train()
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightinig` loggers. The final loss can be accessed by `trainer.logged_metrics`
# In[7]:
# In[8]:
# inspecting final loss
@@ -231,19 +233,19 @@ trainer.logged_metrics
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.
# In[8]:
# In[9]:
# plotting the solution
pl.plot(solver=pinn)
#pl.plot(solver=pinn)
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
# In[9]:
# In[10]:
pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
#pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
# As we can see the loss has not reached a minimum, suggesting that we could train for longer