fix Supervised/PINN solvers forward + fix tut5

This commit is contained in:
Dario Coscia
2023-11-09 11:24:00 +01:00
committed by Nicola Demo
parent 4977d55507
commit 4844640727
6 changed files with 123 additions and 79 deletions

File diff suppressed because one or more lines are too long

View File

@@ -6,7 +6,7 @@
# In this tutorial we are going to solve the Darcy flow problem in two dimensions, presented in [*Fourier Neural Operator for
# Parametric Partial Differential Equation*](https://openreview.net/pdf?id=c8P9NQVtmnO). First of all we import the modules needed for the tutorial. Importing `scipy` is needed for input output operations.
# In[1]:
# In[11]:
# !pip install scipy # install scipy
@@ -32,15 +32,15 @@ import matplotlib.pyplot as plt
# Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference.
#
# In[17]:
# In[12]:
# download the dataset
data = io.loadmat("Data_Darcy.mat")
# extract data (we use only 100 data for train)
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)[:100, ...]
k_train = torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1)
u_train = torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1)
k_test = torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1)
u_test= torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1)
x = torch.tensor(data['x'], dtype=torch.float)[0]
@@ -49,7 +49,7 @@ y = torch.tensor(data['y'], dtype=torch.float)[0]
# Let's visualize some data
# In[18]:
# In[13]:
plt.subplot(1, 2, 1)
@@ -63,14 +63,14 @@ plt.show()
# We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`.
# In[19]:
# In[14]:
class NeuralOperatorSolver(AbstractProblem):
input_variables = ['u_0']
output_variables = ['u']
conditions = {'data' : Condition(input_points=LabelTensor(k_train, input_variables),
output_points=LabelTensor(u_train, input_variables))}
output_points=LabelTensor(u_train, output_variables))}
# make problem
problem = NeuralOperatorSolver()
@@ -80,7 +80,7 @@ problem = NeuralOperatorSolver()
#
# We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning.
# In[20]:
# In[15]:
# make model
@@ -91,13 +91,13 @@ model = FeedForward(input_dimensions=1, output_dimensions=1)
solver = SupervisedSolver(problem=problem, model=model)
# make the trainer and train
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
trainer.train()
# The final loss is pretty high... We can calculate the error by importing `LpLoss`.
# In[21]:
# In[16]:
from pina.loss import LpLoss
@@ -117,7 +117,7 @@ print(f'Final error testing {err:.2f}%')
#
# We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see.
# In[22]:
# In[17]:
# make model
@@ -135,13 +135,13 @@ model = FNO(lifting_net=lifting_net,
solver = SupervisedSolver(problem=problem, model=model)
# make the trainer and train
trainer = Trainer(solver=solver, max_epochs=100, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
trainer.train()
# We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used.
# In[23]:
# In[18]:
err = float(metric_err(u_train.squeeze(-1), solver.models[0](k_train).squeeze(-1)).mean())*100