Update tutorials 1 through 7
This commit is contained in:
committed by
Nicola Demo
parent
7ef39f1e3b
commit
17792caa34
201
tutorials/tutorial1/tutorial.ipynb
vendored
201
tutorials/tutorial1/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
52
tutorials/tutorial1/tutorial.py
vendored
52
tutorials/tutorial1/tutorial.py
vendored
@@ -53,7 +53,7 @@
|
||||
# What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`:
|
||||
#
|
||||
|
||||
# In[1]:
|
||||
# In[10]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -87,9 +87,9 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
|
||||
|
||||
# ### Write the problem class
|
||||
#
|
||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operator` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||
|
||||
# In[2]:
|
||||
# In[1]:
|
||||
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
@@ -99,7 +99,7 @@ from pina.domain import CartesianDomain
|
||||
from pina.equation import Equation, FixedValue
|
||||
|
||||
import torch
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
class SimpleODE(SpatialProblem):
|
||||
|
||||
@@ -147,7 +147,7 @@ problem = SimpleODE()
|
||||
#
|
||||
# Data for training can come in form of direct numerical simulation results, or points in the domains. In case we perform unsupervised learning, we just need the collocation points for training, i.e. points where we want to evaluate the neural network. Sampling point in **PINA** is very easy, here we show three examples using the `.discretise_domain` method of the `AbstractProblem` class.
|
||||
|
||||
# In[3]:
|
||||
# In[2]:
|
||||
|
||||
|
||||
# sampling 20 points in [0, 1] through discretization in all locations
|
||||
@@ -163,7 +163,7 @@ problem.discretise_domain(n=20, mode='random')
|
||||
|
||||
# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
|
||||
|
||||
# In[4]:
|
||||
# In[3]:
|
||||
|
||||
|
||||
# sampling for training
|
||||
@@ -173,7 +173,7 @@ problem.discretise_domain(20, 'lh', domains=['D'])
|
||||
|
||||
# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem
|
||||
|
||||
# In[5]:
|
||||
# In[4]:
|
||||
|
||||
|
||||
print('Input points:', problem.discretised_domains)
|
||||
@@ -182,10 +182,9 @@ print('Input points labels:', problem.discretised_domains['D'].labels)
|
||||
|
||||
# To visualize the sampled points we can use `matplotlib.pyplot`:
|
||||
|
||||
# In[6]:
|
||||
# In[5]:
|
||||
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
variables=problem.spatial_variables
|
||||
fig = plt.figure()
|
||||
proj = "3d" if len(variables) == 3 else None
|
||||
@@ -197,15 +196,15 @@ for location in problem.input_pts:
|
||||
|
||||
# ## Perform a small training
|
||||
|
||||
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
|
||||
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solver`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightning` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callback.MetricTracker`.
|
||||
|
||||
# In[7]:
|
||||
# In[6]:
|
||||
|
||||
|
||||
from pina import Trainer
|
||||
from pina.solver import PINN
|
||||
from pina.model import FeedForward
|
||||
from pina.callback import MetricTracker
|
||||
from lightning.pytorch.loggers import TensorBoardLogger
|
||||
|
||||
|
||||
# build the model
|
||||
@@ -220,15 +219,15 @@ model = FeedForward(
|
||||
pinn = PINN(problem, model)
|
||||
|
||||
# create the trainer
|
||||
trainer = Trainer(solver=pinn, max_epochs=1500, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=pinn, max_epochs=1500, logger=TensorBoardLogger('tutorial_logs'), accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
|
||||
# train
|
||||
trainer.train()
|
||||
|
||||
|
||||
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightinig` loggers. The final loss can be accessed by `trainer.logged_metrics`
|
||||
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightning` loggers. The final loss can be accessed by `trainer.logged_metrics`
|
||||
|
||||
# In[8]:
|
||||
# In[7]:
|
||||
|
||||
|
||||
# inspecting final loss
|
||||
@@ -237,7 +236,7 @@ trainer.logged_metrics
|
||||
|
||||
# By using `matplotlib` we can also do some qualitative plots of the solution.
|
||||
|
||||
# In[9]:
|
||||
# In[8]:
|
||||
|
||||
|
||||
pts = pinn.problem.spatial_domain.sample(256, 'grid', variables='x')
|
||||
@@ -250,24 +249,15 @@ ax.plot(pts.extract(['x']), true_output, label='True solution')
|
||||
plt.legend()
|
||||
|
||||
|
||||
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
|
||||
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also take a look at the loss using `TensorBoard`:
|
||||
|
||||
# In[10]:
|
||||
# In[9]:
|
||||
|
||||
|
||||
list_ = [
|
||||
idx for idx, s in enumerate(trainer.callbacks)
|
||||
if isinstance(s, MetricTracker)
|
||||
]
|
||||
trainer_metrics = trainer.callbacks[list_[0]].metrics
|
||||
|
||||
loss = trainer_metrics['val_loss']
|
||||
epochs = range(len(loss))
|
||||
plt.plot(epochs, loss.cpu())
|
||||
# plotting
|
||||
plt.xlabel('epoch')
|
||||
plt.ylabel('loss')
|
||||
plt.yscale('log')
|
||||
# Load the TensorBoard extension
|
||||
get_ipython().run_line_magic('load_ext', 'tensorboard')
|
||||
# Show saved losses
|
||||
get_ipython().run_line_magic('tensorboard', "--logdir 'tutorial_logs'")
|
||||
|
||||
|
||||
# As we can see the loss has not reached a minimum, suggesting that we could train for longer
|
||||
|
||||
184
tutorials/tutorial2/tutorial.ipynb
vendored
184
tutorials/tutorial2/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
116
tutorials/tutorial2/tutorial.py
vendored
116
tutorials/tutorial2/tutorial.py
vendored
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# First of all, some useful imports.
|
||||
|
||||
# In[1]:
|
||||
# In[4]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -23,6 +23,8 @@ if IN_COLAB:
|
||||
|
||||
import torch
|
||||
from torch.nn import Softplus
|
||||
import matplotlib.pyplot as plt
|
||||
import warnings
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operator import laplacian
|
||||
@@ -31,9 +33,13 @@ from pina.solver import PINN
|
||||
from pina.trainer import Trainer
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.equation import Equation, FixedValue
|
||||
from pina import Condition, LabelTensor#,Plotter
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.callback import MetricTracker
|
||||
|
||||
from lightning.pytorch.loggers import TensorBoardLogger
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
# ## The problem definition
|
||||
|
||||
@@ -49,7 +55,7 @@ from pina.callback import MetricTracker
|
||||
# The Poisson problem is written in **PINA** code as a class. The equations are written as *conditions* that should be satisfied in the corresponding domains. The *truth_solution*
|
||||
# is the exact solution which will be compared with the predicted one.
|
||||
|
||||
# In[2]:
|
||||
# In[5]:
|
||||
|
||||
|
||||
class Poisson(SpatialProblem):
|
||||
@@ -90,33 +96,68 @@ problem.discretise_domain(25, 'grid', domains=['bound_cond1', 'bound_cond2', 'bo
|
||||
|
||||
# After the problem, the feed-forward neural network is defined, through the class `FeedForward`. This neural network takes as input the coordinates (in this case $x$ and $y$) and provides the unkwown field of the Poisson problem. The residual of the equations are evaluated at several sampling points (which the user can manipulate using the method `CartesianDomain_pts`) and the loss minimized by the neural network is the sum of the residuals.
|
||||
#
|
||||
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs. We use the `MetricTracker` class to track the metrics during training.
|
||||
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs with a learning rate of 0.006 and $l_2$ weight regularization set to $10^{-8}$. These parameters can be modified as desired.
|
||||
|
||||
# In[4]:
|
||||
# In[6]:
|
||||
|
||||
|
||||
# make model + solver + trainer
|
||||
from pina.optim import TorchOptimizer
|
||||
model = FeedForward(
|
||||
layers=[10, 10],
|
||||
func=Softplus,
|
||||
output_dimensions=len(problem.output_variables),
|
||||
input_dimensions=len(problem.input_variables)
|
||||
)
|
||||
pinn = PINN(problem, model)
|
||||
trainer = Trainer(pinn, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
pinn = PINN(problem, model, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.006,weight_decay=1e-8))
|
||||
trainer = Trainer(pinn, max_epochs=1000, accelerator='cpu', enable_model_summary=False,
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0,
|
||||
logger=TensorBoardLogger("tutorial_logs")
|
||||
) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
|
||||
# train
|
||||
trainer.train()
|
||||
|
||||
|
||||
# Now the `Plotter` class is used to plot the results.
|
||||
# Now we plot the results using `matplotlib`.
|
||||
# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed.
|
||||
|
||||
# In[5]:
|
||||
# In[7]:
|
||||
|
||||
|
||||
#plotter = Plotter()
|
||||
#plotter.plot(solver=pinn)
|
||||
@torch.no_grad()
|
||||
def plot_solution(solver):
|
||||
# get the problem
|
||||
problem = solver.problem
|
||||
# get spatial points
|
||||
spatial_samples = problem.spatial_domain.sample(30, "grid")
|
||||
# compute pinn solution, true solution and absolute difference
|
||||
data = {
|
||||
"PINN solution": solver(spatial_samples),
|
||||
"True solution": problem.truth_solution(spatial_samples),
|
||||
"Absolute Difference": torch.abs(
|
||||
solver(spatial_samples) - problem.truth_solution(spatial_samples)
|
||||
)
|
||||
}
|
||||
# plot the solution
|
||||
for idx, (title, field) in enumerate(data.items()):
|
||||
plt.subplot(1, 3, idx + 1)
|
||||
plt.title(title)
|
||||
plt.tricontourf( # convert to torch tensor + flatten
|
||||
spatial_samples.extract("x").tensor.flatten(),
|
||||
spatial_samples.extract("y").tensor.flatten(),
|
||||
field.tensor.flatten(),
|
||||
)
|
||||
plt.colorbar(), plt.tight_layout()
|
||||
|
||||
|
||||
# In[8]:
|
||||
|
||||
|
||||
plt.figure(figsize=(12, 6))
|
||||
plot_solution(solver=pinn)
|
||||
|
||||
|
||||
# ## Solving the problem with extra-features PINNs
|
||||
@@ -135,7 +176,7 @@ trainer.train()
|
||||
#
|
||||
# Finally, we perform the same training as before: the problem is `Poisson`, the network is composed by the same number of neurons and optimizer parameters are equal to previous test, the only change is the new extra feature.
|
||||
|
||||
# In[6]:
|
||||
# In[9]:
|
||||
|
||||
|
||||
class SinSin(torch.nn.Module):
|
||||
@@ -170,8 +211,12 @@ model_feat = FeedForwardWithExtraFeatures(
|
||||
layers=[10, 10],
|
||||
extra_features=[SinSin()])
|
||||
|
||||
pinn_feat = PINN(problem, model_feat)
|
||||
trainer_feat = Trainer(pinn_feat, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
pinn_feat = PINN(problem, model_feat, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.006,weight_decay=1e-8))
|
||||
trainer_feat = Trainer(pinn_feat, max_epochs=1000, accelerator='cpu', enable_model_summary=False,
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0,
|
||||
logger=TensorBoardLogger("tutorial_logs")) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
|
||||
trainer_feat.train()
|
||||
|
||||
@@ -179,10 +224,11 @@ trainer_feat.train()
|
||||
# The predicted and exact solutions and the error between them are represented below.
|
||||
# We can easily note that now our network, having almost the same condition as before, is able to reach additional order of magnitudes in accuracy.
|
||||
|
||||
# In[7]:
|
||||
# In[10]:
|
||||
|
||||
|
||||
#plotter.plot(solver=pinn_feat)
|
||||
plt.figure(figsize=(12, 6))
|
||||
plot_solution(solver=pinn_feat)
|
||||
|
||||
|
||||
# ## Solving the problem with learnable extra-features PINNs
|
||||
@@ -199,7 +245,7 @@ trainer_feat.train()
|
||||
# where $\alpha$ and $\beta$ are the abovementioned parameters.
|
||||
# Their implementation is quite trivial: by using the class `torch.nn.Parameter` we cam define all the learnable parameters we need, and they are managed by `autograd` module!
|
||||
|
||||
# In[8]:
|
||||
# In[11]:
|
||||
|
||||
|
||||
class SinSinAB(torch.nn.Module):
|
||||
@@ -219,15 +265,19 @@ class SinSinAB(torch.nn.Module):
|
||||
|
||||
|
||||
# make model + solver + trainer
|
||||
model_lean = FeedForwardWithExtraFeatures(
|
||||
model_learn = FeedForwardWithExtraFeatures(
|
||||
input_dimensions=len(problem.input_variables) + 1, #we add one as also we consider the extra feature dimension
|
||||
output_dimensions=len(problem.output_variables),
|
||||
func=Softplus,
|
||||
layers=[10, 10],
|
||||
extra_features=[SinSinAB()])
|
||||
|
||||
pinn_lean = PINN(problem, model_lean)
|
||||
trainer_learn = Trainer(pinn_lean, max_epochs=1000, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
pinn_learn = PINN(problem, model_learn, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.006,weight_decay=1e-8))
|
||||
trainer_learn = Trainer(pinn_learn, max_epochs=1000, enable_model_summary=False,
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0,
|
||||
logger=TensorBoardLogger("tutorial_logs")) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
|
||||
# train
|
||||
trainer_learn.train()
|
||||
@@ -235,18 +285,22 @@ trainer_learn.train()
|
||||
|
||||
# Umh, the final loss is not appreciabily better than previous model (with static extra features), despite the usage of learnable parameters. This is mainly due to the over-parametrization of the network: there are many parameter to optimize during the training, and the model in unable to understand automatically that only the parameters of the extra feature (and not the weights/bias of the FFN) should be tuned in order to fit our problem. A longer training can be helpful, but in this case the faster way to reach machine precision for solving the Poisson problem is removing all the hidden layers in the `FeedForward`, keeping only the $\alpha$ and $\beta$ parameters of the extra feature.
|
||||
|
||||
# In[9]:
|
||||
# In[12]:
|
||||
|
||||
|
||||
# make model + solver + trainer
|
||||
model_lean= FeedForwardWithExtraFeatures(
|
||||
model_learn= FeedForwardWithExtraFeatures(
|
||||
layers=[],
|
||||
func=Softplus,
|
||||
output_dimensions=len(problem.output_variables),
|
||||
input_dimensions=len(problem.input_variables)+1,
|
||||
extra_features=[SinSinAB()])
|
||||
pinn_learn = PINN(problem, model_lean)
|
||||
trainer_learn = Trainer(pinn_learn, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
pinn_learn = PINN(problem, model_learn, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.006,weight_decay=1e-8))
|
||||
trainer_learn = Trainer(pinn_learn, max_epochs=1000, accelerator='cpu', enable_model_summary=False,
|
||||
train_size=1.0,
|
||||
val_size=0.0,
|
||||
test_size=0.0,
|
||||
logger=TensorBoardLogger("tutorial_logs")) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
|
||||
# train
|
||||
trainer_learn.train()
|
||||
@@ -257,20 +311,14 @@ trainer_learn.train()
|
||||
#
|
||||
# We conclude here by showing the graphical comparison of the unknown field and the loss trend for all the test cases presented here: the standard PINN, PINN with extra features, and PINN with learnable extra features.
|
||||
|
||||
# In[10]:
|
||||
|
||||
|
||||
#plotter.plot(solver=pinn_learn)
|
||||
|
||||
|
||||
# Let us compare the training losses for the various types of training
|
||||
|
||||
# In[11]:
|
||||
# In[13]:
|
||||
|
||||
|
||||
#plotter.plot_loss(trainer, logy=True, label='Standard')
|
||||
#plotter.plot_loss(trainer_feat, logy=True,label='Static Features')
|
||||
#plotter.plot_loss(trainer_learn, logy=True, label='Learnable Features')
|
||||
# Load the TensorBoard extension
|
||||
get_ipython().run_line_magic('load_ext', 'tensorboard')
|
||||
get_ipython().run_line_magic('tensorboard', "--logdir 'tutorial_logs'")
|
||||
|
||||
|
||||
# ## What's next?
|
||||
|
||||
234
tutorials/tutorial4/tutorial.ipynb
vendored
234
tutorials/tutorial4/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
6
tutorials/tutorial4/tutorial.py
vendored
6
tutorials/tutorial4/tutorial.py
vendored
@@ -23,13 +23,13 @@ if IN_COLAB:
|
||||
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('tableau-colorblind10')
|
||||
import torchvision # for MNIST dataset
|
||||
|
||||
from pina.problem import AbstractProblem
|
||||
from pina.solver import SupervisedSolver
|
||||
from pina.trainer import Trainer
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.model.block import ContinuousConvBlock
|
||||
import torchvision # for MNIST dataset
|
||||
from pina.model import FeedForward # for building AE and MNIST classification
|
||||
|
||||
|
||||
@@ -508,7 +508,7 @@ class CircleProblem(AbstractProblem):
|
||||
input_variables = ['x', 'y', 'f']
|
||||
output_variables = input_variables
|
||||
al=LabelTensor(input_data, input_variables)
|
||||
conditions = {'data' : Condition(input_points=LabelTensor(input_data, input_variables), output_points=LabelTensor(input_data, output_variables))}
|
||||
conditions = {'data' : Condition(input=LabelTensor(input_data, input_variables), target=LabelTensor(input_data, output_variables))}
|
||||
|
||||
# define the solver
|
||||
solver = SupervisedSolver(problem=CircleProblem(), model=net, loss=torch.nn.MSELoss(), use_lt=True)
|
||||
|
||||
27
tutorials/tutorial5/tutorial.ipynb
vendored
27
tutorials/tutorial5/tutorial.ipynb
vendored
@@ -43,17 +43,16 @@
|
||||
" # get the data\n",
|
||||
" !wget https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial5/Data_Darcy.mat\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"import torch\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"# !pip install scipy # install scipy\n",
|
||||
"from scipy import io\n",
|
||||
"import torch\n",
|
||||
"from pina.model import FNO, FeedForward # let's import some models\n",
|
||||
"from pina import Condition, LabelTensor\n",
|
||||
"from pina.solver import SupervisedSolver\n",
|
||||
"from pina.trainer import Trainer\n",
|
||||
"from pina.problem import AbstractProblem\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"plt.style.use('tableau-colorblind10')"
|
||||
"from pina.problem import AbstractProblem"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -150,7 +149,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "8b27d283",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
@@ -203,7 +202,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 40.29it/s, v_num=8, data_loss_step=0.103, train_loss_step=0.0993, val_loss_step=0.103, data_loss_epoch=0.105, val_loss_epoch=0.102, train_loss_epoch=0.105] "
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 69.54it/s, v_num=14, data_loss_step=0.109, train_loss_step=0.109, val_loss_step=0.109, data_loss_epoch=0.105, val_loss_epoch=0.104, train_loss_epoch=0.105] "
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -217,7 +216,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 40.09it/s, v_num=8, data_loss_step=0.103, train_loss_step=0.0993, val_loss_step=0.103, data_loss_epoch=0.105, val_loss_epoch=0.102, train_loss_epoch=0.105]\n"
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:01<00:00, 69.13it/s, v_num=14, data_loss_step=0.109, train_loss_step=0.109, val_loss_step=0.109, data_loss_epoch=0.105, val_loss_epoch=0.104, train_loss_epoch=0.105]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -258,8 +257,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 56.17%\n",
|
||||
"Final error testing 56.07%\n"
|
||||
"Final error training 56.26%\n",
|
||||
"Final error testing 56.15%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -311,7 +310,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:03<00:00, 20.06it/s, v_num=9, data_loss_step=0.00303, train_loss_step=0.00401, val_loss_step=0.00303, data_loss_epoch=0.00338, val_loss_epoch=0.00363, train_loss_epoch=0.00338]"
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:02<00:00, 26.49it/s, v_num=15, data_loss_step=0.00535, train_loss_step=0.00358, val_loss_step=0.00535, data_loss_epoch=0.00372, val_loss_epoch=0.00392, train_loss_epoch=0.00372]"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -325,7 +324,7 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:03<00:00, 19.94it/s, v_num=9, data_loss_step=0.00303, train_loss_step=0.00401, val_loss_step=0.00303, data_loss_epoch=0.00338, val_loss_epoch=0.00363, train_loss_epoch=0.00338]\n"
|
||||
"Epoch 9: 100%|██████████| 70/70 [00:02<00:00, 26.33it/s, v_num=15, data_loss_step=0.00535, train_loss_step=0.00358, val_loss_step=0.00535, data_loss_epoch=0.00372, val_loss_epoch=0.00392, train_loss_epoch=0.00372]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -372,8 +371,8 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Final error training 9.14%\n",
|
||||
"Final error testing 9.15%\n"
|
||||
"Final error training 9.37%\n",
|
||||
"Final error testing 9.25%\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
||||
9
tutorials/tutorial5/tutorial.py
vendored
9
tutorials/tutorial5/tutorial.py
vendored
@@ -24,17 +24,16 @@ if IN_COLAB:
|
||||
# get the data
|
||||
get_ipython().system('wget https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial5/Data_Darcy.mat')
|
||||
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# !pip install scipy # install scipy
|
||||
from scipy import io
|
||||
import torch
|
||||
from pina.model import FNO, FeedForward # let's import some models
|
||||
from pina import Condition, LabelTensor
|
||||
from pina.solver import SupervisedSolver
|
||||
from pina.trainer import Trainer
|
||||
from pina.problem import AbstractProblem
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('tableau-colorblind10')
|
||||
|
||||
|
||||
# ## Data Generation
|
||||
@@ -89,8 +88,8 @@ plt.show()
|
||||
class NeuralOperatorSolver(AbstractProblem):
|
||||
input_variables = k_train.full_labels[3]['dof']
|
||||
output_variables = u_train.full_labels[3]['dof']
|
||||
conditions = {'data' : Condition(input_points=k_train,
|
||||
output_points=u_train)}
|
||||
conditions = {'data' : Condition(input=k_train,
|
||||
target=u_train)}
|
||||
# make problem
|
||||
problem = NeuralOperatorSolver()
|
||||
|
||||
|
||||
48
tutorials/tutorial6/tutorial.ipynb
vendored
48
tutorials/tutorial6/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
2
tutorials/tutorial6/tutorial.py
vendored
2
tutorials/tutorial6/tutorial.py
vendored
@@ -26,7 +26,7 @@ if IN_COLAB:
|
||||
get_ipython().system('pip install "pina-mathlab"')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('tableau-colorblind10')
|
||||
|
||||
from pina.domain import EllipsoidDomain, Difference, CartesianDomain, Union, SimplexDomain, DomainInterface
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
|
||||
BIN
tutorials/tutorial7/data/pinn_solution_0.5_0.5
vendored
Normal file
BIN
tutorials/tutorial7/data/pinn_solution_0.5_0.5
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial7/data/pts_0.5_0.5
vendored
Normal file
BIN
tutorials/tutorial7/data/pts_0.5_0.5
vendored
Normal file
Binary file not shown.
49
tutorials/tutorial7/tutorial.ipynb
vendored
49
tutorials/tutorial7/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
33
tutorials/tutorial7/tutorial.py
vendored
33
tutorials/tutorial7/tutorial.py
vendored
@@ -25,7 +25,7 @@
|
||||
|
||||
# Let's start with useful imports.
|
||||
|
||||
# In[1]:
|
||||
# In[9]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -42,8 +42,8 @@ if IN_COLAB:
|
||||
get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pts_0.5_0.5" -O "data/pts_0.5_0.5"')
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
plt.style.use('tableau-colorblind10')
|
||||
import torch
|
||||
|
||||
from pina.problem import SpatialProblem, InverseProblem
|
||||
from pina.operator import laplacian
|
||||
from pina.model import FeedForward
|
||||
@@ -53,18 +53,18 @@ from pina.solver import PINN
|
||||
from pina.domain import CartesianDomain
|
||||
|
||||
|
||||
# Then, we import the pre-saved data, for ($\mu_1$, $\mu_2$)=($0.5$, $0.5$). These two values are the optimal parameters that we want to find through the neural network training. In particular, we import the `input_points`(the spatial coordinates), and the `output_points` (the corresponding $u$ values evaluated at the `input_points`).
|
||||
# Then, we import the pre-saved data, for ($\mu_1$, $\mu_2$)=($0.5$, $0.5$). These two values are the optimal parameters that we want to find through the neural network training. In particular, we import the `input` points (the spatial coordinates), and the `target` points (the corresponding $u$ values evaluated at the `input`).
|
||||
|
||||
# In[2]:
|
||||
# In[10]:
|
||||
|
||||
|
||||
data_output = torch.load('data/pinn_solution_0.5_0.5').detach()
|
||||
data_input = torch.load('data/pts_0.5_0.5')
|
||||
data_output = torch.load('data/pinn_solution_0.5_0.5', weights_only = False).detach()
|
||||
data_input = torch.load('data/pts_0.5_0.5', weights_only = False)
|
||||
|
||||
|
||||
# Moreover, let's plot also the data points and the reference solution: this is the expected output of the neural network.
|
||||
|
||||
# In[3]:
|
||||
# In[11]:
|
||||
|
||||
|
||||
points = data_input.extract(['x', 'y']).detach().numpy()
|
||||
@@ -80,7 +80,7 @@ plt.show()
|
||||
|
||||
# Then, we initialize the Poisson problem, that is inherited from the `SpatialProblem` and from the `InverseProblem` classes. We here have to define all the variables, and the domain where our unknown parameters ($\mu_1$, $\mu_2$) belong. Notice that the Laplace equation takes as inputs also the unknown variables, that will be treated as parameters that the neural network optimizes during the training process.
|
||||
|
||||
# In[4]:
|
||||
# In[12]:
|
||||
|
||||
|
||||
### Define ranges of variables
|
||||
@@ -126,7 +126,7 @@ class Poisson(SpatialProblem, InverseProblem):
|
||||
'phys_cond': Condition(domain=CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]
|
||||
}),
|
||||
equation=Equation(laplace_equation)),
|
||||
'data': Condition(input_points=data_input.extract(['x', 'y']), output_points=data_output)
|
||||
'data': Condition(input=data_input.extract(['x', 'y']), target=data_output)
|
||||
}
|
||||
|
||||
problem = Poisson()
|
||||
@@ -134,7 +134,7 @@ problem = Poisson()
|
||||
|
||||
# Then, we define the neural network model we want to use. Here we used a model which imposes hard constrains on the boundary conditions, as also done in the Wave tutorial!
|
||||
|
||||
# In[5]:
|
||||
# In[13]:
|
||||
|
||||
|
||||
model = FeedForward(
|
||||
@@ -147,7 +147,7 @@ model = FeedForward(
|
||||
|
||||
# After that, we discretize the spatial domain.
|
||||
|
||||
# In[6]:
|
||||
# In[14]:
|
||||
|
||||
|
||||
problem.discretise_domain(20, 'grid', domains=['phys_cond'])
|
||||
@@ -158,7 +158,7 @@ problem.discretise_domain(1000, 'random', domains=['bound_cond1', 'bound_cond2',
|
||||
# Here, we define a simple callback for the trainer. We use this callback to save the parameters predicted by the neural network during the training. The parameters are saved every 100 epochs as `torch` tensors in a specified directory (`tmp_dir` in our case).
|
||||
# The goal is to read the saved parameters after training and plot their trend across the epochs.
|
||||
|
||||
# In[7]:
|
||||
# In[15]:
|
||||
|
||||
|
||||
from lightning.pytorch.callbacks import Callback
|
||||
@@ -176,12 +176,13 @@ class SaveParameters(Callback):
|
||||
|
||||
# Then, we define the `PINN` object and train the solver using the `Trainer`.
|
||||
|
||||
# In[8]:
|
||||
# In[16]:
|
||||
|
||||
|
||||
### train the problem with PINN
|
||||
from pina.optim import TorchOptimizer
|
||||
max_epochs = 5000
|
||||
pinn = PINN(problem, model)
|
||||
pinn = PINN(problem, model, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.005))
|
||||
# define the trainer for the solver
|
||||
trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=max_epochs,
|
||||
default_root_dir=tmp_dir, enable_model_summary=False, callbacks=[SaveParameters()])
|
||||
@@ -190,13 +191,13 @@ trainer.train()
|
||||
|
||||
# One can now see how the parameters vary during the training by reading the saved solution and plotting them. The plot shows that the parameters stabilize to their true value before reaching the epoch $1000$!
|
||||
|
||||
# In[9]:
|
||||
# In[17]:
|
||||
|
||||
|
||||
epochs_saved = range(99, max_epochs, 100)
|
||||
parameters = torch.empty((int(max_epochs/100), 2))
|
||||
for i, epoch in enumerate(epochs_saved):
|
||||
params_torch = torch.load('{}/parameters_epoch{}'.format(tmp_dir, epoch))
|
||||
params_torch = torch.load('{}/parameters_epoch{}'.format(tmp_dir, epoch),weights_only = False)
|
||||
for e, var in enumerate(pinn.problem.unknown_variables):
|
||||
parameters[i, e] = params_torch[var].data
|
||||
|
||||
|
||||
Reference in New Issue
Block a user