Updates to tutorial and run post codacy changes

This commit is contained in:
Matteo Bertocchi
2025-03-10 17:18:48 +01:00
committed by Nicola Demo
parent 9e55746546
commit b38b0894b1
27 changed files with 952 additions and 393 deletions

View File

@@ -26,6 +26,7 @@ if IN_COLAB:
get_ipython().system('pip install "pina-mathlab"')
import torch
import warnings
from pina import Condition, Trainer
from pina.solver import PINN
@@ -35,6 +36,8 @@ from pina.operator import grad
from pina.domain import CartesianDomain
from pina.equation import Equation, FixedValue
warnings.filterwarnings('ignore')
class SimpleODE(SpatialProblem):
output_variables = ['u']
@@ -131,8 +134,11 @@ for _ in range(3):
pinn = PINN(problem, model)
trainer = Trainer(solver=pinn,
accelerator='cpu',
logger=TensorBoardLogger(save_dir='simpleode'),
enable_model_summary=False)
logger=TensorBoardLogger(save_dir='training_log'),
enable_model_summary=False,
train_size=1.0,
val_size=0.0,
test_size=0.0)
trainer.train()
@@ -193,8 +199,12 @@ model = FeedForward(
pinn = PINN(problem, model)
trainer = Trainer(solver=pinn,
accelerator='cpu',
logger=True,
callbacks=[NaiveMetricTracker()], # adding a callbacks
enable_model_summary=False,
callbacks=[NaiveMetricTracker()]) # adding a callbacks
train_size=1.0,
val_size=0.0,
test_size=0.0)
trainer.train()
@@ -210,7 +220,7 @@ trainer.callbacks[0].saved_metrics[:3] # only the first three epochs
#
# We can for example try the `EarlyStopping` routine, which automatically stops the training when a specific metric converged (here the `train_loss`). In order to let the training keep going forever set `max_epochs=-1`.
# In[ ]:
# In[8]:
# ~5 mins