Fix SupervisedSolver GPU bug and implement GraphSolver (#346)
* Fix some bugs * Solve bug with GPU and model_summary parameters in SupervisedSolver class * Implement GraphSolver class * Fix Tutorial 5
This commit is contained in:
committed by
Nicola Demo
parent
30f865d912
commit
2be57944ba
@@ -4,6 +4,7 @@ from .sample_dataset import SamplePointDataset
|
|||||||
from .data_dataset import DataPointDataset
|
from .data_dataset import DataPointDataset
|
||||||
from .pina_batch import Batch
|
from .pina_batch import Batch
|
||||||
|
|
||||||
|
|
||||||
class SamplePointLoader:
|
class SamplePointLoader:
|
||||||
"""
|
"""
|
||||||
This class is used to create a dataloader to use during the training.
|
This class is used to create a dataloader to use during the training.
|
||||||
@@ -95,7 +96,7 @@ class SamplePointLoader:
|
|||||||
self.batch_output_pts = torch.tensor_split(
|
self.batch_output_pts = torch.tensor_split(
|
||||||
dataset.output_pts, batch_num
|
dataset.output_pts, batch_num
|
||||||
)
|
)
|
||||||
print(input_labels)
|
#print(input_labels)
|
||||||
for i in range(len(self.batch_input_pts)):
|
for i in range(len(self.batch_input_pts)):
|
||||||
self.batch_input_pts[i].labels = input_labels
|
self.batch_input_pts[i].labels = input_labels
|
||||||
self.batch_output_pts[i].labels = output_labels
|
self.batch_output_pts[i].labels = output_labels
|
||||||
@@ -161,7 +162,6 @@ class SamplePointLoader:
|
|||||||
self.batch_input_pts,
|
self.batch_input_pts,
|
||||||
self.batch_output_pts,
|
self.batch_output_pts,
|
||||||
self.batch_data_conditions)
|
self.batch_data_conditions)
|
||||||
print(batch.input.labels)
|
|
||||||
|
|
||||||
self.batches.append(batch)
|
self.batches.append(batch)
|
||||||
|
|
||||||
|
|||||||
@@ -425,7 +425,7 @@ class LabelTensor(torch.Tensor):
|
|||||||
|
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
labels = [tensor.labels for tensor in tensors]
|
labels = [tensor.labels for tensor in tensors]
|
||||||
print(labels)
|
|
||||||
|
|
||||||
def requires_grad_(self, mode=True):
|
def requires_grad_(self, mode=True):
|
||||||
lt = super().requires_grad_(mode)
|
lt = super().requires_grad_(mode)
|
||||||
@@ -436,7 +436,6 @@ class LabelTensor(torch.Tensor):
|
|||||||
def dtype(self):
|
def dtype(self):
|
||||||
return super().dtype
|
return super().dtype
|
||||||
|
|
||||||
|
|
||||||
def to(self, *args, **kwargs):
|
def to(self, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Performs Tensor dtype and/or device conversion. For more details, see
|
Performs Tensor dtype and/or device conversion. For more details, see
|
||||||
@@ -447,7 +446,6 @@ class LabelTensor(torch.Tensor):
|
|||||||
new.data = tmp.data
|
new.data = tmp.data
|
||||||
return new
|
return new
|
||||||
|
|
||||||
|
|
||||||
def clone(self, *args, **kwargs):
|
def clone(self, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
Clone the LabelTensor. For more details, see
|
Clone the LabelTensor. For more details, see
|
||||||
|
|||||||
@@ -269,4 +269,7 @@ class FNO(KernelNeuralOperator):
|
|||||||
:return: The output tensor obtained from FNO.
|
:return: The output tensor obtained from FNO.
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if isinstance(x, LabelTensor):
|
||||||
|
x = x.as_subclass(torch.Tensor)
|
||||||
return super().forward(x)
|
return super().forward(x)
|
||||||
|
|||||||
@@ -17,3 +17,4 @@ from .pinns import *
|
|||||||
from .supervised import SupervisedSolver
|
from .supervised import SupervisedSolver
|
||||||
from .rom import ReducedOrderModelSolver
|
from .rom import ReducedOrderModelSolver
|
||||||
from .garom import GAROM
|
from .garom import GAROM
|
||||||
|
from .graph import GraphSupervisedSolver
|
||||||
|
|||||||
34
pina/solvers/graph.py
Normal file
34
pina/solvers/graph.py
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
from .supervised import SupervisedSolver
|
||||||
|
from ..graph import Graph
|
||||||
|
|
||||||
|
|
||||||
|
class GraphSupervisedSolver(SupervisedSolver):
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
problem,
|
||||||
|
model,
|
||||||
|
nodes_coordinates,
|
||||||
|
nodes_data,
|
||||||
|
loss=None,
|
||||||
|
optimizer=None,
|
||||||
|
scheduler=None):
|
||||||
|
super().__init__(problem, model, loss, optimizer, scheduler)
|
||||||
|
if isinstance(nodes_coordinates, str):
|
||||||
|
self._nodes_coordinates = [nodes_coordinates]
|
||||||
|
else:
|
||||||
|
self._nodes_coordinates = nodes_coordinates
|
||||||
|
if isinstance(nodes_data, str):
|
||||||
|
self._nodes_data = nodes_data
|
||||||
|
else:
|
||||||
|
self._nodes_data = nodes_data
|
||||||
|
|
||||||
|
def forward(self, input):
|
||||||
|
input_coords = input.extract(self._nodes_coordinates)
|
||||||
|
input_data = input.extract(self._nodes_data)
|
||||||
|
|
||||||
|
if not isinstance(input, Graph):
|
||||||
|
input = Graph.build('radius', nodes_coordinates=input_coords, nodes_data=input_data, radius=0.2)
|
||||||
|
g = self.model(input.data, edge_index=input.data.edge_index)
|
||||||
|
g.labels = {1: {'name': 'output', 'dof': ['u']}}
|
||||||
|
return g
|
||||||
@@ -82,7 +82,10 @@ class SupervisedSolver(SolverInterface):
|
|||||||
|
|
||||||
# check consistency
|
# check consistency
|
||||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||||
self.loss = loss
|
self._loss = loss
|
||||||
|
self._model = self._pina_model[0]
|
||||||
|
self._optimizer = self._pina_optimizer[0]
|
||||||
|
self._scheduler = self._pina_scheduler[0]
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
"""Forward pass implementation for the solver.
|
"""Forward pass implementation for the solver.
|
||||||
@@ -92,7 +95,7 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
output = self._pina_model[0](x)
|
output = self._model(x)
|
||||||
|
|
||||||
output.labels = {
|
output.labels = {
|
||||||
1: {
|
1: {
|
||||||
@@ -108,11 +111,11 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:return: The optimizers and the schedulers
|
:return: The optimizers and the schedulers
|
||||||
:rtype: tuple(list, list)
|
:rtype: tuple(list, list)
|
||||||
"""
|
"""
|
||||||
self._pina_optimizer[0].hook(self._pina_model[0].parameters())
|
self._optimizer.hook(self._model.parameters())
|
||||||
self._pina_scheduler[0].hook(self._pina_optimizer[0])
|
self._scheduler.hook(self._optimizer)
|
||||||
return (
|
return (
|
||||||
[self._pina_optimizer[0].optimizer_instance],
|
[self._optimizer.optimizer_instance],
|
||||||
[self._pina_scheduler[0].scheduler_instance]
|
[self._scheduler.scheduler_instance]
|
||||||
)
|
)
|
||||||
|
|
||||||
def training_step(self, batch, batch_idx):
|
def training_step(self, batch, batch_idx):
|
||||||
@@ -170,28 +173,28 @@ class SupervisedSolver(SolverInterface):
|
|||||||
:return: The residual loss averaged on the input coordinates
|
:return: The residual loss averaged on the input coordinates
|
||||||
:rtype: torch.Tensor
|
:rtype: torch.Tensor
|
||||||
"""
|
"""
|
||||||
return self.loss(self.forward(input_pts), output_pts)
|
return self._loss(self.forward(input_pts), output_pts)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def scheduler(self):
|
def scheduler(self):
|
||||||
"""
|
"""
|
||||||
Scheduler for training.
|
Scheduler for training.
|
||||||
"""
|
"""
|
||||||
return self._pina_scheduler
|
return self._scheduler
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def optimizer(self):
|
def optimizer(self):
|
||||||
"""
|
"""
|
||||||
Optimizer for training.
|
Optimizer for training.
|
||||||
"""
|
"""
|
||||||
return self._pina_optimizer
|
return self._optimizer
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def model(self):
|
def model(self):
|
||||||
"""
|
"""
|
||||||
Neural network for training.
|
Neural network for training.
|
||||||
"""
|
"""
|
||||||
return self._pina_model
|
return self._model
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def loss(self):
|
def loss(self):
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -15,7 +15,7 @@ VERSION = meta['__version__']
|
|||||||
KEYWORDS = 'machine-learning deep-learning modeling pytorch ode neural-networks differential-equations pde hacktoberfest pinn physics-informed physics-informed-neural-networks neural-operators equation-learning lightining'
|
KEYWORDS = 'machine-learning deep-learning modeling pytorch ode neural-networks differential-equations pde hacktoberfest pinn physics-informed physics-informed-neural-networks neural-operators equation-learning lightining'
|
||||||
|
|
||||||
REQUIRED = [
|
REQUIRED = [
|
||||||
'numpy<2.0', 'matplotlib', 'torch', 'lightning', 'pytorch_lightning'
|
'numpy', 'matplotlib', 'torch', 'lightning', 'pytorch_lightning', 'torch_geometric', 'torch-cluster'
|
||||||
]
|
]
|
||||||
|
|
||||||
EXTRAS = {
|
EXTRAS = {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ from pina.solvers import SupervisedSolver
|
|||||||
from pina.trainer import Trainer
|
from pina.trainer import Trainer
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.loss import LpLoss
|
from pina.loss import LpLoss
|
||||||
|
from pina.solvers import GraphSupervisedSolver
|
||||||
|
|
||||||
class NeuralOperatorProblem(AbstractProblem):
|
class NeuralOperatorProblem(AbstractProblem):
|
||||||
input_variables = ['u_0', 'u_1']
|
input_variables = ['u_0', 'u_1']
|
||||||
@@ -27,6 +27,25 @@ class NeuralOperatorProblem(AbstractProblem):
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class NeuralOperatorProblemGraph(AbstractProblem):
|
||||||
|
input_variables = ['x', 'y', 'u_0', 'u_1']
|
||||||
|
output_variables = ['u']
|
||||||
|
domains = {
|
||||||
|
'pts': LabelTensor(
|
||||||
|
torch.rand(100, 4),
|
||||||
|
labels={1: {'name': 'space', 'dof': ['x', 'y', 'u_0', 'u_1']}}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
conditions = {
|
||||||
|
'data' : Condition(
|
||||||
|
domain='pts',
|
||||||
|
output_points=LabelTensor(
|
||||||
|
torch.rand(100, 1),
|
||||||
|
labels={1: {'name': 'output', 'dof': ['u']}}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
class myFeature(torch.nn.Module):
|
class myFeature(torch.nn.Module):
|
||||||
"""
|
"""
|
||||||
Feature: sin(x)
|
Feature: sin(x)
|
||||||
@@ -42,6 +61,7 @@ class myFeature(torch.nn.Module):
|
|||||||
|
|
||||||
|
|
||||||
problem = NeuralOperatorProblem()
|
problem = NeuralOperatorProblem()
|
||||||
|
problem_graph = NeuralOperatorProblemGraph()
|
||||||
# make the problem + extra feats
|
# make the problem + extra feats
|
||||||
extra_feats = [myFeature()]
|
extra_feats = [myFeature()]
|
||||||
model = FeedForward(len(problem.input_variables),
|
model = FeedForward(len(problem.input_variables),
|
||||||
@@ -58,7 +78,7 @@ def test_constructor():
|
|||||||
# def test_constructor_extra_feats():
|
# def test_constructor_extra_feats():
|
||||||
# SupervisedSolver(problem=problem, model=model_extra_feats, extra_features=extra_feats)
|
# SupervisedSolver(problem=problem, model=model_extra_feats, extra_features=extra_feats)
|
||||||
|
|
||||||
|
'''
|
||||||
class AutoSolver(SupervisedSolver):
|
class AutoSolver(SupervisedSolver):
|
||||||
|
|
||||||
def forward(self, input):
|
def forward(self, input):
|
||||||
@@ -70,12 +90,13 @@ class AutoSolver(SupervisedSolver):
|
|||||||
print(input)
|
print(input)
|
||||||
print(input.data.edge_index)
|
print(input.data.edge_index)
|
||||||
print(input.data)
|
print(input.data)
|
||||||
g = self.model[0](input.data, edge_index=input.data.edge_index)
|
g = self._model(input.data, edge_index=input.data.edge_index)
|
||||||
g.labels = {1: {'name': 'output', 'dof': ['u']}}
|
g.labels = {1: {'name': 'output', 'dof': ['u']}}
|
||||||
return g
|
return g
|
||||||
du_dt_new = LabelTensor(self.model[0](graph).reshape(-1,1), labels = ['du'])
|
du_dt_new = LabelTensor(self.model(graph).reshape(-1,1), labels = ['du'])
|
||||||
|
|
||||||
return du_dt_new
|
return du_dt_new
|
||||||
|
'''
|
||||||
|
|
||||||
class GraphModel(torch.nn.Module):
|
class GraphModel(torch.nn.Module):
|
||||||
def __init__(self, in_channels, out_channels):
|
def __init__(self, in_channels, out_channels):
|
||||||
@@ -94,7 +115,8 @@ class GraphModel(torch.nn.Module):
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
def test_graph():
|
def test_graph():
|
||||||
solver = AutoSolver(problem = problem, model=GraphModel(2, 1), loss=LpLoss())
|
solver = GraphSupervisedSolver(problem=problem_graph, model=GraphModel(2, 1), loss=LpLoss(),
|
||||||
|
nodes_coordinates=['x', 'y'], nodes_data=['u_0', 'u_1'])
|
||||||
trainer = Trainer(solver=solver, max_epochs=30, accelerator='cpu', batch_size=20)
|
trainer = Trainer(solver=solver, max_epochs=30, accelerator='cpu', batch_size=20)
|
||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
@@ -105,7 +127,6 @@ def test_train_cpu():
|
|||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# def test_train_restore():
|
# def test_train_restore():
|
||||||
# tmpdir = "tests/tmp_restore"
|
# tmpdir = "tests/tmp_restore"
|
||||||
# solver = SupervisedSolver(problem=problem,
|
# solver = SupervisedSolver(problem=problem,
|
||||||
@@ -153,3 +174,4 @@ def test_train_cpu():
|
|||||||
# extra_features=extra_feats)
|
# extra_features=extra_feats)
|
||||||
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
# trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu')
|
||||||
# trainer.train()
|
# trainer.train()
|
||||||
|
test_graph()
|
||||||
329
tutorials/tutorial5/tutorial.ipynb
vendored
329
tutorials/tutorial5/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
64
tutorials/tutorial5/tutorial.py
vendored
64
tutorials/tutorial5/tutorial.py
vendored
@@ -48,24 +48,28 @@ plt.style.use('tableau-colorblind10')
|
|||||||
# Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference.
|
# Specifically, $u$ is the flow pressure, $k$ is the permeability field and $f$ is the forcing function. The Darcy flow can parameterize a variety of systems including flow through porous media, elastic materials and heat conduction. Here you will define the domain as a 2D unit square Dirichlet boundary conditions. The dataset is taken from the authors original reference.
|
||||||
#
|
#
|
||||||
|
|
||||||
# In[12]:
|
# In[2]:
|
||||||
|
|
||||||
|
|
||||||
# download the dataset
|
# download the dataset
|
||||||
data = io.loadmat("Data_Darcy.mat")
|
data = io.loadmat("Data_Darcy.mat")
|
||||||
|
|
||||||
# extract data (we use only 100 data for train)
|
# extract data (we use only 100 data for train)
|
||||||
k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1), ['u0'])
|
k_train = LabelTensor(torch.tensor(data['k_train'], dtype=torch.float).unsqueeze(-1),
|
||||||
u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1), ['u'])
|
labels={3:{'dof': ['u0'], 'name': 'k_train'}})
|
||||||
k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1), ['u0'])
|
u_train = LabelTensor(torch.tensor(data['u_train'], dtype=torch.float).unsqueeze(-1),
|
||||||
u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1), ['u'])
|
labels={3:{'dof': ['u'], 'name': 'u_train'}})
|
||||||
|
k_test = LabelTensor(torch.tensor(data['k_test'], dtype=torch.float).unsqueeze(-1),
|
||||||
|
labels={3:{'dof': ['u0'], 'name': 'k_test'}})
|
||||||
|
u_test= LabelTensor(torch.tensor(data['u_test'], dtype=torch.float).unsqueeze(-1),
|
||||||
|
labels={3:{'dof': ['u'], 'name': 'u_test'}})
|
||||||
x = torch.tensor(data['x'], dtype=torch.float)[0]
|
x = torch.tensor(data['x'], dtype=torch.float)[0]
|
||||||
y = torch.tensor(data['y'], dtype=torch.float)[0]
|
y = torch.tensor(data['y'], dtype=torch.float)[0]
|
||||||
|
|
||||||
|
|
||||||
# Let's visualize some data
|
# Let's visualize some data
|
||||||
|
|
||||||
# In[13]:
|
# In[3]:
|
||||||
|
|
||||||
|
|
||||||
plt.subplot(1, 2, 1)
|
plt.subplot(1, 2, 1)
|
||||||
@@ -77,15 +81,24 @@ plt.imshow(u_train.squeeze(-1)[0])
|
|||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
# In[4]:
|
||||||
|
|
||||||
|
|
||||||
|
u_train.labels[3]['dof']
|
||||||
|
|
||||||
|
|
||||||
# We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`.
|
# We now create the neural operator class. It is a very simple class, inheriting from `AbstractProblem`.
|
||||||
|
|
||||||
# In[17]:
|
# In[5]:
|
||||||
|
|
||||||
|
|
||||||
class NeuralOperatorSolver(AbstractProblem):
|
class NeuralOperatorSolver(AbstractProblem):
|
||||||
input_variables = k_train.labels
|
input_variables = k_train.labels[3]['dof']
|
||||||
output_variables = u_train.labels
|
output_variables = u_train.labels[3]['dof']
|
||||||
conditions = {'data' : Condition(input_points=k_train,
|
domains = {
|
||||||
|
'pts': k_train
|
||||||
|
}
|
||||||
|
conditions = {'data' : Condition(domain='pts',
|
||||||
output_points=u_train)}
|
output_points=u_train)}
|
||||||
|
|
||||||
# make problem
|
# make problem
|
||||||
@@ -96,7 +109,7 @@ problem = NeuralOperatorSolver()
|
|||||||
#
|
#
|
||||||
# We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning.
|
# We will first solve the problem using a Feedforward neural network. We will use the `SupervisedSolver` for solving the problem, since we are training using supervised learning.
|
||||||
|
|
||||||
# In[18]:
|
# In[6]:
|
||||||
|
|
||||||
|
|
||||||
# make model
|
# make model
|
||||||
@@ -107,25 +120,26 @@ model = FeedForward(input_dimensions=1, output_dimensions=1)
|
|||||||
solver = SupervisedSolver(problem=problem, model=model)
|
solver = SupervisedSolver(problem=problem, model=model)
|
||||||
|
|
||||||
# make the trainer and train
|
# make the trainer and train
|
||||||
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10) # we train on CPU and avoid model summary at beginning of training (optional)
|
trainer = Trainer(solver=solver, max_epochs=10, accelerator='cpu', enable_model_summary=False, batch_size=10)
|
||||||
|
# We train on CPU and avoid model summary at the beginning of training (optional)
|
||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
|
|
||||||
# The final loss is pretty high... We can calculate the error by importing `LpLoss`.
|
# The final loss is pretty high... We can calculate the error by importing `LpLoss`.
|
||||||
|
|
||||||
# In[19]:
|
# In[7]:
|
||||||
|
|
||||||
|
|
||||||
from pina.loss.loss_interface import LpLoss
|
from pina.loss import LpLoss
|
||||||
|
|
||||||
# make the metric
|
# make the metric
|
||||||
metric_err = LpLoss(relative=True)
|
metric_err = LpLoss(relative=True)
|
||||||
|
|
||||||
|
model = solver.models[0]
|
||||||
err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100
|
err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100
|
||||||
print(f'Final error training {err:.2f}%')
|
print(f'Final error training {err:.2f}%')
|
||||||
|
|
||||||
err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100
|
err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100
|
||||||
print(f'Final error testing {err:.2f}%')
|
print(f'Final error testing {err:.2f}%')
|
||||||
|
|
||||||
|
|
||||||
@@ -133,7 +147,7 @@ print(f'Final error testing {err:.2f}%')
|
|||||||
#
|
#
|
||||||
# We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see.
|
# We will now move to solve the problem using a FNO. Since we are learning operator this approach is better suited, as we shall see.
|
||||||
|
|
||||||
# In[24]:
|
# In[8]:
|
||||||
|
|
||||||
|
|
||||||
# make model
|
# make model
|
||||||
@@ -157,13 +171,15 @@ trainer.train()
|
|||||||
|
|
||||||
# We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used.
|
# We can clearly see that the final loss is lower. Let's see in testing.. Notice that the number of parameters is way higher than a `FeedForward` network. We suggest to use GPU or TPU for a speed up in training, when many data samples are used.
|
||||||
|
|
||||||
# In[25]:
|
# In[9]:
|
||||||
|
|
||||||
|
|
||||||
err = float(metric_err(u_train.squeeze(-1), solver.neural_net(k_train).squeeze(-1)).mean())*100
|
model = solver.models[0]
|
||||||
|
|
||||||
|
err = float(metric_err(u_train.squeeze(-1), model(k_train).squeeze(-1)).mean())*100
|
||||||
print(f'Final error training {err:.2f}%')
|
print(f'Final error training {err:.2f}%')
|
||||||
|
|
||||||
err = float(metric_err(u_test.squeeze(-1), solver.neural_net(k_test).squeeze(-1)).mean())*100
|
err = float(metric_err(u_test.squeeze(-1), model(k_test).squeeze(-1)).mean())*100
|
||||||
print(f'Final error testing {err:.2f}%')
|
print(f'Final error testing {err:.2f}%')
|
||||||
|
|
||||||
|
|
||||||
@@ -172,3 +188,9 @@ print(f'Final error testing {err:.2f}%')
|
|||||||
# ## What's next?
|
# ## What's next?
|
||||||
#
|
#
|
||||||
# We have made a very simple example on how to use the `FNO` for learning neural operator. Currently in **PINA** we implement 1D/2D/3D cases. We suggest to extend the tutorial using more complex problems and train for longer, to see the full potential of neural operators.
|
# We have made a very simple example on how to use the `FNO` for learning neural operator. Currently in **PINA** we implement 1D/2D/3D cases. We suggest to extend the tutorial using more complex problems and train for longer, to see the full potential of neural operators.
|
||||||
|
|
||||||
|
# In[ ]:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user