fix Supervised/PINN solvers forward + fix tut5
This commit is contained in:
committed by
Nicola Demo
parent
4977d55507
commit
4844640727
@@ -44,8 +44,8 @@ taken from the authors original reference.
|
||||
data = io.loadmat("Data_Darcy.mat")
|
||||
|
||||
# extract data (we use only 100 data for train)
|
||||
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
|
||||
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
|
||||
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)
|
||||
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)
|
||||
k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1)
|
||||
u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1)
|
||||
x = torch.tensor(data['x'], dtype=torch.float)[0]
|
||||
@@ -77,7 +77,7 @@ inheriting from ``AbstractProblem``.
|
||||
input_variables = ['u_0']
|
||||
output_variables = ['u']
|
||||
conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables),
|
||||
output_points=LabelTensor(u_train, input_variables))}
|
||||
output_points=LabelTensor(u_train, output_variables))}
|
||||
|
||||
# make problem
|
||||
problem = NeuralOperatorSolver()
|
||||
@@ -99,7 +99,7 @@ training using supervised learning.
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer.train()
|
||||
|
||||
|
||||
@@ -112,15 +112,18 @@ training using supervised learning.
|
||||
HPU available: False, using: 0 HPUs
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Epoch 9: : 100it [00:00, 383.36it/s, v_num=36, mean_loss=0.108]
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Training: 0it [00:00, ?it/s]
|
||||
`Trainer.fit` stopped: `max_epochs=10` reached.
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
`Trainer.fit` stopped: `max_epochs=100` reached.
|
||||
Epoch 9: : 100it [00:00, 380.57it/s, v_num=36, mean_loss=0.108]
|
||||
|
||||
|
||||
The final loss is pretty high… We can calculate the error by importing
|
||||
@@ -143,8 +146,8 @@ The final loss is pretty high… We can calculate the error by importing
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Final error training 56.24%
|
||||
Final error testing 55.95%
|
||||
Final error training 56.04%
|
||||
Final error testing 56.01%
|
||||
|
||||
|
||||
Solving the problem with a Fuorier Neural Operator (FNO)
|
||||
@@ -170,7 +173,7 @@ operator this approach is better suited, as we shall see.
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer.train()
|
||||
|
||||
|
||||
@@ -183,15 +186,18 @@ operator this approach is better suited, as we shall see.
|
||||
HPU available: False, using: 0 HPUs
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Epoch 9: : 100it [00:04, 22.13it/s, v_num=37, mean_loss=0.000952]
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Training: 0it [00:00, ?it/s]
|
||||
`Trainer.fit` stopped: `max_epochs=10` reached.
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
`Trainer.fit` stopped: `max_epochs=100` reached.
|
||||
Epoch 9: : 100it [00:04, 22.07it/s, v_num=37, mean_loss=0.000952]
|
||||
|
||||
|
||||
We can clearly see that the final loss is lower. Let’s see in testing..
|
||||
@@ -210,8 +216,8 @@ training, when many data samples are used.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
Final error training 10.86%
|
||||
Final error testing 12.77%
|
||||
Final error training 4.45%
|
||||
Final error testing 4.91%
|
||||
|
||||
|
||||
As we can see the loss is way lower!
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 15 KiB |
@@ -83,11 +83,11 @@ class PINN(SolverInterface):
|
||||
:return: PINN solution.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# extract labels
|
||||
x = x.extract(self.problem.input_variables)
|
||||
# perform forward pass
|
||||
# extract torch.Tensor from corresponding label
|
||||
x = x.extract(self.problem.input_variables).as_subclass(torch.Tensor)
|
||||
# perform forward pass (using torch.Tensor) + converting to LabelTensor
|
||||
output = self.neural_net(x).as_subclass(LabelTensor)
|
||||
# set the labels
|
||||
# set the labels for LabelTensor
|
||||
output.labels = self.problem.output_variables
|
||||
return output
|
||||
|
||||
|
||||
@@ -72,11 +72,11 @@ class SupervisedSolver(SolverInterface):
|
||||
:return: Solver solution.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
# extract labels
|
||||
x = x.extract(self.problem.input_variables)
|
||||
# perform forward pass
|
||||
# extract torch.Tensor from corresponding label
|
||||
x = x.extract(self.problem.input_variables).as_subclass(torch.Tensor)
|
||||
# perform forward pass (using torch.Tensor) + converting to LabelTensor
|
||||
output = self.neural_net(x).as_subclass(LabelTensor)
|
||||
# set the labels
|
||||
# set the labels for LabelTensor
|
||||
output.labels = self.problem.output_variables
|
||||
return output
|
||||
|
||||
@@ -99,6 +99,44 @@ class SupervisedSolver(SolverInterface):
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
dataloader = self.trainer.train_dataloader
|
||||
condition_idx = batch['condition']
|
||||
|
||||
for condition_id in range(condition_idx.min(), condition_idx.max()+1):
|
||||
|
||||
condition_name = dataloader.condition_names[condition_id]
|
||||
condition = self.problem.conditions[condition_name]
|
||||
pts = batch['pts']
|
||||
out = batch['output']
|
||||
|
||||
if condition_name not in self.problem.conditions:
|
||||
raise RuntimeError('Something wrong happened.')
|
||||
|
||||
# for data driven mode
|
||||
if not hasattr(condition, 'output_points'):
|
||||
raise NotImplementedError('Supervised solver works only in data-driven mode.')
|
||||
|
||||
output_pts = out[condition_idx == condition_id]
|
||||
input_pts = pts[condition_idx == condition_id]
|
||||
|
||||
loss = self.loss(self.forward(input_pts), output_pts) * condition.data_weight
|
||||
loss = loss.as_subclass(torch.Tensor)
|
||||
|
||||
self.log('mean_loss', float(loss), prog_bar=True, logger=True)
|
||||
return loss
|
||||
|
||||
|
||||
def training_step_(self, batch, batch_idx):
|
||||
"""Solver training step.
|
||||
|
||||
:param batch: The batch element in the dataloader.
|
||||
:type batch: tuple
|
||||
:param batch_idx: The batch index.
|
||||
:type batch_idx: int
|
||||
:return: The sum of the loss functions.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
|
||||
for condition_name, samples in batch.items():
|
||||
|
||||
if condition_name not in self.problem.conditions:
|
||||
|
||||
86
tutorials/tutorial5/tutorial.ipynb
vendored
86
tutorials/tutorial5/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
26
tutorials/tutorial5/tutorial.py
vendored
26
tutorials/tutorial5/tutorial.py
vendored
@@ -6,7 +6,7 @@
|
||||
# In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for
|
||||
# Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input output operations.
|
||||
|
||||
# In[1]:
|
||||
# In[11]:
|
||||
|
||||
|
||||
# !pip install scipy # install scipy
|
||||
@@ -32,15 +32,15 @@ import matplotlib.pyplot as plt
|
||||
# Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference.
|
||||
#
|
||||
|
||||
# In[17]:
|
||||
# In[12]:
|
||||
|
||||
|
||||
# download the dataset
|
||||
data = io.loadmat("Data_Darcy.mat")
|
||||
|
||||
# extract data (we use only 100 data for train)
|
||||
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
|
||||
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
|
||||
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)
|
||||
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)
|
||||
k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1)
|
||||
u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1)
|
||||
x = torch.tensor(data['x'], dtype=torch.float)[0]
|
||||
@@ -49,7 +49,7 @@ y = torch.tensor(data['y'], dtype=torch.float)[0]
|
||||
|
||||
# Let's visualize some data
|
||||
|
||||
# In[18]:
|
||||
# In[13]:
|
||||
|
||||
|
||||
plt.subplot(1, 2, 1)
|
||||
@@ -63,14 +63,14 @@ plt.show()
|
||||
|
||||
# We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`.
|
||||
|
||||
# In[19]:
|
||||
# In[14]:
|
||||
|
||||
|
||||
class NeuralOperatorSolver(AbstractProblem):
|
||||
input_variables = ['u_0']
|
||||
output_variables = ['u']
|
||||
conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables),
|
||||
output_points=LabelTensor(u_train, input_variables))}
|
||||
output_points=LabelTensor(u_train, output_variables))}
|
||||
|
||||
# make problem
|
||||
problem = NeuralOperatorSolver()
|
||||
@@ -80,7 +80,7 @@ problem = NeuralOperatorSolver()
|
||||
#
|
||||
# We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning.
|
||||
|
||||
# In[20]:
|
||||
# In[15]:
|
||||
|
||||
|
||||
# make model
|
||||
@@ -91,13 +91,13 @@ model = FeedForward(input_dimensions=1, output_dimensions=1)
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer.train()
|
||||
|
||||
|
||||
# The final loss is pretty high... We can calculate the error by importing `LpLoss`.
|
||||
|
||||
# In[21]:
|
||||
# In[16]:
|
||||
|
||||
|
||||
from pina.loss import LpLoss
|
||||
@@ -117,7 +117,7 @@ print(f'Final error testing {err:.2f}%')
|
||||
#
|
||||
# We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see.
|
||||
|
||||
# In[22]:
|
||||
# In[17]:
|
||||
|
||||
|
||||
# make model
|
||||
@@ -135,13 +135,13 @@ model = FNO(lifting_net=lifting_net,
|
||||
solver = SupervisedSolver(problem=problem, model=model)
|
||||
|
||||
# make the trainer and train
|
||||
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||
trainer.train()
|
||||
|
||||
|
||||
# We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used.
|
||||
|
||||
# In[23]:
|
||||
# In[18]:
|
||||
|
||||
|
||||
err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100
|
||||
|
||||
Reference in New Issue
Block a user