Updates to tutorial and run post codacy changes
This commit is contained in:
committed by
Nicola Demo
parent
9e55746546
commit
b38b0894b1
32
tutorials/tutorial5/tutorial.ipynb
vendored
32
tutorials/tutorial5/tutorial.ipynb
vendored
@@ -45,6 +45,7 @@
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"import warnings\n",
|
||||
"\n",
|
||||
"# !pip install scipy # install scipy\n",
|
||||
"from scipy import io\n",
|
||||
@@ -52,7 +53,9 @@
|
||||
"from pina import Condition, LabelTensor\n",
|
||||
"from pina.solver import SupervisedSolver\n",
|
||||
"from pina.trainer import Trainer\n",
|
||||
"from pina.problem import AbstractProblem"
|
||||
"from pina.problem import AbstractProblem\n",
|
||||
"\n",
|
||||
"warnings.filterwarnings('ignore')"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -202,7 +205,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 69.54it/s, v_num=14, data_loss_step=0.109, train_loss_step=0.109, val_loss_step=0.109, data_loss_epoch=0.105, val_loss_epoch=0.104, train_loss_epoch=0.105] "
|
||||
"Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 85.78it/s, v_num=18, data_loss_step=0.104, train_loss_step=0.104, data_loss_epoch=0.105, train_loss_epoch=0.105] "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -216,7 +219,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 69.13it/s, v_num=14, data_loss_step=0.109, train_loss_step=0.109, val_loss_step=0.109, data_loss_epoch=0.105, val_loss_epoch=0.104, train_loss_epoch=0.105]\n"
|
||||
"Epoch 9: 100%|██████████| 100/100 [00:01<00:00, 85.24it/s, v_num=18, data_loss_step=0.104, train_loss_step=0.104, data_loss_epoch=0.105, train_loss_epoch=0.105]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -229,8 +232,11 @@
|
||||
"solver = SupervisedSolver(problem=problem, model=model)\n",
|
||||
"\n",
|
||||
"# make the trainer and train\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) \n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10,\n",
|
||||
"# We train on CPU and avoid model summary at the beginning of training (optional)\n",
|
||||
"train_size=1.0,\n",
|
||||
"val_size=0.0,\n",
|
||||
"test_size=0.0)\n",
|
||||
"trainer.train()"
|
||||
]
|
||||
},
|
||||
@@ -257,8 +263,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 56.26%\n",
|
||||
"Final error testing 56.15%\n"
|
||||
"Final error training 56.14%\n",
|
||||
"Final error testing 56.02%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -310,7 +316,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:02<00:00, 26.49it/s, v_num=15, data_loss_step=0.00535, train_loss_step=0.00358, val_loss_step=0.00535, data_loss_epoch=0.00372, val_loss_epoch=0.00392, train_loss_epoch=0.00372]"
|
||||
"Epoch 9: 100%|██████████| 100/100 [00:03<00:00, 32.20it/s, v_num=19, data_loss_step=0.00203, train_loss_step=0.00203, data_loss_epoch=0.00234, train_loss_epoch=0.00234]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -324,7 +330,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:02<00:00, 26.33it/s, v_num=15, data_loss_step=0.00535, train_loss_step=0.00358, val_loss_step=0.00535, data_loss_epoch=0.00372, val_loss_epoch=0.00392, train_loss_epoch=0.00372]\n"
|
||||
"Epoch 9: 100%|██████████| 100/100 [00:03<00:00, 32.08it/s, v_num=19, data_loss_step=0.00203, train_loss_step=0.00203, data_loss_epoch=0.00234, train_loss_epoch=0.00234]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -344,7 +350,11 @@
|
||||
"solver = SupervisedSolver(problem=problem, model=model)\n",
|
||||
"\n",
|
||||
"# make the trainer and train\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)\n",
|
||||
"trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, # We train on CPU and avoid model summary at the beginning of training (optional)\n",
|
||||
"batch_size=10,\n",
|
||||
"train_size=1.0,\n",
|
||||
"val_size=0.0,\n",
|
||||
"test_size=0.0)\n",
|
||||
"trainer.train()"
|
||||
]
|
||||
},
|
||||
@@ -371,8 +381,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 9.37%\n",
|
||||
"Final error testing 9.25%\n"
|
||||
"Final error training 6.98%\n",
|
||||
"Final error testing 7.26%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
14
tutorials/tutorial5/tutorial.py
vendored
14
tutorials/tutorial5/tutorial.py
vendored
@@ -26,6 +26,7 @@ if IN_COLAB:
|
||||
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
import warnings
|
||||
|
||||
# !pip install scipy # install scipy
|
||||
from scipy import io
|
||||
@@ -35,6 +36,8 @@ from pina.solver import SupervisedSolver
|
||||
from pina.trainer import Trainer
|
||||
from pina.problem import AbstractProblem
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
# ## Data Generation
|
||||
#
|
||||
@@ -109,8 +112,11 @@ model = FeedForward(input_dimensions=1, output_dimensions=1)
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10,
|
||||
# We train on CPU and avoid model summary at the beginning of training (optional)
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0)
|
||||
trainer.train()
|
||||
|
||||
|
||||
@@ -154,7 +160,11 @@ model = FNO(lifting_net=lifting_net,
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, # We train on CPU and avoid model summary at the beginning of training (optional)
|
||||
batch_size=10,
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0)
|
||||
trainer.train()
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user