Update tutorials 8 through 14
This commit is contained in:
committed by
Nicola Demo
parent
17792caa34
commit
9e55746546
BIN
tutorials/tutorial10/data/Data_KS.mat
vendored
Normal file
BIN
tutorials/tutorial10/data/Data_KS.mat
vendored
Normal file
Binary file not shown.
BIN
tutorials/tutorial10/data/Data_KS2.mat
vendored
Normal file
BIN
tutorials/tutorial10/data/Data_KS2.mat
vendored
Normal file
Binary file not shown.
14
tutorials/tutorial10/tutorial.ipynb
vendored
14
tutorials/tutorial10/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
6
tutorials/tutorial10/tutorial.py
vendored
6
tutorials/tutorial10/tutorial.py
vendored
@@ -32,7 +32,7 @@ if IN_COLAB:
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
plt.style.use('tableau-colorblind10')
|
|
||||||
from scipy import io
|
from scipy import io
|
||||||
from pina import Condition, LabelTensor
|
from pina import Condition, LabelTensor
|
||||||
from pina.problem import AbstractProblem
|
from pina.problem import AbstractProblem
|
||||||
@@ -211,8 +211,8 @@ model = AveragingNeuralOperator(lifting_net=lifting_net,
|
|||||||
class NeuralOperatorProblem(AbstractProblem):
|
class NeuralOperatorProblem(AbstractProblem):
|
||||||
input_variables = initial_cond_train.labels
|
input_variables = initial_cond_train.labels
|
||||||
output_variables = sol_train.labels
|
output_variables = sol_train.labels
|
||||||
conditions = {'data' : Condition(input_points=initial_cond_train,
|
conditions = {'data' : Condition(input=initial_cond_train,
|
||||||
output_points=sol_train)}
|
target=sol_train)}
|
||||||
|
|
||||||
|
|
||||||
# initialize problem
|
# initialize problem
|
||||||
|
|||||||
BIN
tutorials/tutorial11/logging.png
vendored
Normal file
BIN
tutorials/tutorial11/logging.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 204 KiB |
123
tutorials/tutorial13/tutorial.ipynb
vendored
123
tutorials/tutorial13/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
95
tutorials/tutorial13/tutorial.py
vendored
95
tutorials/tutorial13/tutorial.py
vendored
@@ -26,17 +26,21 @@ if IN_COLAB:
|
|||||||
get_ipython().system('pip install "pina-mathlab"')
|
get_ipython().system('pip install "pina-mathlab"')
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import warnings
|
||||||
|
|
||||||
from pina import Condition, Plotter, Trainer, Plotter
|
from pina import Condition, Trainer
|
||||||
from pina.problem import SpatialProblem
|
from pina.problem import SpatialProblem
|
||||||
from pina.operators import laplacian
|
from pina.operator import laplacian
|
||||||
from pina.solvers import PINN, SAPINN
|
from pina.solver import PINN, SelfAdaptivePINN as SAPINN
|
||||||
from pina.model.layers import FourierFeatureEmbedding
|
from pina.model.block import FourierFeatureEmbedding
|
||||||
from pina.loss import LpLoss
|
from pina.loss import LpLoss
|
||||||
from pina.domain import CartesianDomain
|
from pina.domain import CartesianDomain
|
||||||
from pina.equation import Equation, FixedValue
|
from pina.equation import Equation, FixedValue
|
||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
|
|
||||||
|
warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
|
|
||||||
# ## Multiscale Problem
|
# ## Multiscale Problem
|
||||||
#
|
#
|
||||||
@@ -74,10 +78,10 @@ class Poisson(SpatialProblem):
|
|||||||
|
|
||||||
# here we write the problem conditions
|
# here we write the problem conditions
|
||||||
conditions = {
|
conditions = {
|
||||||
'bound_cond0' : Condition(domain=CartesianDomain({'x': 0}),
|
'bound_cond0' : Condition(domain=CartesianDomain({'x': 0.}),
|
||||||
equation=FixedValue(0)),
|
equation=FixedValue(0.)),
|
||||||
'bound_cond1' : Condition(domain=CartesianDomain({'x': 1}),
|
'bound_cond1' : Condition(domain=CartesianDomain({'x': 1.}),
|
||||||
equation=FixedValue(0)),
|
equation=FixedValue(0.)),
|
||||||
'phys_cond': Condition(domain=spatial_domain,
|
'phys_cond': Condition(domain=spatial_domain,
|
||||||
equation=Equation(poisson_equation)),
|
equation=Equation(poisson_equation)),
|
||||||
}
|
}
|
||||||
@@ -88,7 +92,8 @@ class Poisson(SpatialProblem):
|
|||||||
problem = Poisson()
|
problem = Poisson()
|
||||||
|
|
||||||
# let's discretise the domain
|
# let's discretise the domain
|
||||||
problem.discretise_domain(128, 'grid')
|
problem.discretise_domain(128, 'grid', domains=['phys_cond'])
|
||||||
|
problem.discretise_domain(1, 'grid', domains=['bound_cond0','bound_cond1'])
|
||||||
|
|
||||||
|
|
||||||
# A standard PINN approach would be to fit this model using a Feed Forward (fully connected) Neural Network. For a conventional fully-connected neural network is easy to
|
# A standard PINN approach would be to fit this model using a Feed Forward (fully connected) Neural Network. For a conventional fully-connected neural network is easy to
|
||||||
@@ -96,36 +101,57 @@ problem.discretise_domain(128, 'grid')
|
|||||||
#
|
#
|
||||||
# Below we run a simulation using the `PINN` solver and the self adaptive `SAPINN` solver, using a [`FeedForward`](https://mathlab.github.io/PINA/_modules/pina/model/feed_forward.html#FeedForward) model. We used a `MultiStepLR` scheduler to decrease the learning rate slowly during training (it takes around 2 minutes to run on CPU).
|
# Below we run a simulation using the `PINN` solver and the self adaptive `SAPINN` solver, using a [`FeedForward`](https://mathlab.github.io/PINA/_modules/pina/model/feed_forward.html#FeedForward) model. We used a `MultiStepLR` scheduler to decrease the learning rate slowly during training (it takes around 2 minutes to run on CPU).
|
||||||
|
|
||||||
# In[19]:
|
# In[3]:
|
||||||
|
|
||||||
|
|
||||||
|
from pina.optim import TorchScheduler
|
||||||
|
|
||||||
# training with PINN and visualize results
|
# training with PINN and visualize results
|
||||||
|
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100])
|
||||||
pinn = PINN(problem=problem,
|
pinn = PINN(problem=problem,
|
||||||
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100]),
|
model=model,
|
||||||
scheduler=torch.optim.lr_scheduler.MultiStepLR,
|
scheduler=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR, # Pass the class directly, not an instance
|
||||||
scheduler_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
|
milestones=[1000,2000,3000,4000],
|
||||||
trainer = Trainer(pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False)
|
gamma=0.9))
|
||||||
|
|
||||||
|
trainer = Trainer(pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.)
|
||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
# training with PINN and visualize results
|
# training with PINN and visualize results
|
||||||
sapinn = SAPINN(problem=problem,
|
sapinn = SAPINN(problem=problem,
|
||||||
model=FeedForward(input_dimensions=1, output_dimensions=1, layers=[100, 100, 100]),
|
model=model,
|
||||||
scheduler_model=torch.optim.lr_scheduler.MultiStepLR,
|
scheduler_model=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR,
|
||||||
scheduler_model_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
|
milestones=[1000,2000,3000,4000],
|
||||||
trainer_sapinn = Trainer(sapinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False)
|
gamma=0.9))
|
||||||
|
trainer_sapinn = Trainer(sapinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.)
|
||||||
trainer_sapinn.train()
|
trainer_sapinn.train()
|
||||||
|
|
||||||
# plot results
|
|
||||||
pl = Plotter()
|
# In[4]:
|
||||||
pl.plot(pinn, title='PINN Solution')
|
|
||||||
pl.plot(sapinn, title='Self Adaptive PINN Solution')
|
|
||||||
|
#define the function to plot the solution obtained using matplotlib
|
||||||
|
def plot_solution(pinn_to_use, title):
|
||||||
|
pts = pinn_to_use.problem.spatial_domain.sample(256, 'grid', variables='x')
|
||||||
|
predicted_output = pinn_to_use.forward(pts).extract('u').as_subclass(torch.Tensor).cpu().detach()
|
||||||
|
true_output = pinn_to_use.problem.truth_solution(pts).cpu().detach()
|
||||||
|
pts = pts.cpu()
|
||||||
|
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 8))
|
||||||
|
ax.plot(pts.extract(['x']), predicted_output, label='Neural Network solution')
|
||||||
|
ax.plot(pts.extract(['x']), true_output, label='True solution')
|
||||||
|
plt.title(title)
|
||||||
|
plt.legend()
|
||||||
|
|
||||||
|
#plot the solution of the two PINNs
|
||||||
|
plot_solution(pinn, 'PINN solution')
|
||||||
|
plot_solution(sapinn, 'Self Adaptive PINN solution')
|
||||||
|
|
||||||
|
|
||||||
# We can clearly see that the solution has not been learned by the two different solvers. Indeed the big problem is not in the optimization strategy (i.e. the solver), but in the model used to solve the problem. A simple `FeedForward` network can hardly handle multiscales if not enough collocation points are used!
|
# We can clearly see that the solution has not been learned by the two different solvers. Indeed the big problem is not in the optimization strategy (i.e. the solver), but in the model used to solve the problem. A simple `FeedForward` network can hardly handle multiscales if not enough collocation points are used!
|
||||||
#
|
#
|
||||||
# We can also compute the $l_2$ relative error for the `PINN` and `SAPINN` solutions:
|
# We can also compute the $l_2$ relative error for the `PINN` and `SAPINN` solutions:
|
||||||
|
|
||||||
# In[20]:
|
# In[5]:
|
||||||
|
|
||||||
|
|
||||||
# l2 loss from PINA losses
|
# l2 loss from PINA losses
|
||||||
@@ -153,7 +179,7 @@ print(f'Relative l2 error SAPINN {l2_loss(sapinn(pts), problem.truth_solution
|
|||||||
# In PINA we already have implemented the feature as a `layer` called [`FourierFeatureEmbedding`](https://mathlab.github.io/PINA/_rst/layers/fourier_embedding.html). Below we will build the *Multi-scale Fourier Feature Architecture*. In this architecture multiple Fourier feature embeddings (initialized with different $\sigma$)
|
# In PINA we already have implemented the feature as a `layer` called [`FourierFeatureEmbedding`](https://mathlab.github.io/PINA/_rst/layers/fourier_embedding.html). Below we will build the *Multi-scale Fourier Feature Architecture*. In this architecture multiple Fourier feature embeddings (initialized with different $\sigma$)
|
||||||
# are applied to input coordinates and then passed through the same fully-connected neural network, before the outputs are finally concatenated with a linear layer.
|
# are applied to input coordinates and then passed through the same fully-connected neural network, before the outputs are finally concatenated with a linear layer.
|
||||||
|
|
||||||
# In[21]:
|
# In[6]:
|
||||||
|
|
||||||
|
|
||||||
class MultiscaleFourierNet(torch.nn.Module):
|
class MultiscaleFourierNet(torch.nn.Module):
|
||||||
@@ -173,36 +199,35 @@ class MultiscaleFourierNet(torch.nn.Module):
|
|||||||
e2 = self.layers(self.embedding2(x))
|
e2 = self.layers(self.embedding2(x))
|
||||||
return self.final_layer(torch.cat([e1, e2], dim=-1))
|
return self.final_layer(torch.cat([e1, e2], dim=-1))
|
||||||
|
|
||||||
MultiscaleFourierNet()
|
|
||||||
|
|
||||||
|
|
||||||
# We will train the `MultiscaleFourierNet` with the `PINN` solver (and feel free to try also with our PINN variants (`SAPINN`, `GPINN`, `CompetitivePINN`, ...).
|
# We will train the `MultiscaleFourierNet` with the `PINN` solver (and feel free to try also with our PINN variants (`SAPINN`, `GPINN`, `CompetitivePINN`, ...).
|
||||||
|
|
||||||
# In[22]:
|
# In[7]:
|
||||||
|
|
||||||
|
|
||||||
multiscale_pinn = PINN(problem=problem,
|
multiscale_pinn = PINN(problem=problem,
|
||||||
model=MultiscaleFourierNet(),
|
model=MultiscaleFourierNet(),
|
||||||
scheduler=torch.optim.lr_scheduler.MultiStepLR,
|
scheduler=TorchScheduler(torch.optim.lr_scheduler.MultiStepLR,
|
||||||
scheduler_kwargs={'milestones' : [1000, 2000, 3000, 4000], 'gamma':0.9})
|
milestones=[1000,2000,3000,4000],
|
||||||
trainer = Trainer(multiscale_pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
|
gamma=0.9))
|
||||||
|
trainer = Trainer(multiscale_pinn, max_epochs=5000, accelerator='cpu', enable_model_summary=False, val_size=0., train_size=1., test_size=0.) # we train on CPU and avoid model summary at beginning of training (optional)
|
||||||
trainer.train()
|
trainer.train()
|
||||||
|
|
||||||
|
|
||||||
# Let us now plot the solution and compute the relative $l_2$ again!
|
# Let us now plot the solution and compute the relative $l_2$ again!
|
||||||
|
|
||||||
# In[24]:
|
# In[8]:
|
||||||
|
|
||||||
|
|
||||||
# plot the solution
|
#plot solution obtained
|
||||||
pl.plot(multiscale_pinn, title='Solution PINN with MultiscaleFourierNet')
|
plot_solution(multiscale_pinn, 'Multiscale PINN solution')
|
||||||
|
|
||||||
# sample new test points
|
# sample new test points
|
||||||
pts = pts = problem.spatial_domain.sample(100, 'grid')
|
pts = pts = problem.spatial_domain.sample(100, 'grid')
|
||||||
print(f'Relative l2 error PINN with MultiscaleFourierNet {l2_loss(multiscale_pinn(pts), problem.truth_solution(pts)).item():.2%}')
|
print(f'Relative l2 error PINN with MultiscaleFourierNet: {l2_loss(multiscale_pinn(pts), problem.truth_solution(pts)).item():.2%}')
|
||||||
|
|
||||||
|
|
||||||
# It is pretty clear that the network has learned the correct solution, with also a very law error. Obviously a longer training and a more expressive neural network could improve the results!
|
# It is pretty clear that the network has learned the correct solution, with also a very low error. Obviously a longer training and a more expressive neural network could improve the results!
|
||||||
#
|
#
|
||||||
# ## What's next?
|
# ## What's next?
|
||||||
#
|
#
|
||||||
|
|||||||
142
tutorials/tutorial14/tutorial.ipynb
vendored
142
tutorials/tutorial14/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
18
tutorials/tutorial14/tutorial.py
vendored
18
tutorials/tutorial14/tutorial.py
vendored
@@ -26,12 +26,15 @@ if IN_COLAB:
|
|||||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
plt.style.use('tableau-colorblind10')
|
|
||||||
import torch
|
import torch
|
||||||
import pina
|
import pina
|
||||||
|
import warnings
|
||||||
|
|
||||||
from pina.model.layers import PODBlock, RBFBlock
|
from pina.model.layers import PODBlock, RBFBlock
|
||||||
from pina import LabelTensor
|
from pina import LabelTensor
|
||||||
|
|
||||||
|
warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
|
|
||||||
# In this tutorial we're gonna use the `LidCavity` class from the [Smithers](https://github.com/mathLab/Smithers) library, which contains a set of parametric solutions of the Lid-driven cavity problem in a square domain. The dataset consists of 300 snapshots of the parameter fields, which in this case are the magnitude of velocity and the pressure, and the corresponding parameter values $u$ and $p$. Each snapshot corresponds to a different value of the tangential velocity $\mu$ of the lid, which has been sampled uniformly between 0.01 m/s and 1 m/s.
|
# In this tutorial we're gonna use the `LidCavity` class from the [Smithers](https://github.com/mathLab/Smithers) library, which contains a set of parametric solutions of the Lid-driven cavity problem in a square domain. The dataset consists of 300 snapshots of the parameter fields, which in this case are the magnitude of velocity and the pressure, and the corresponding parameter values $u$ and $p$. Each snapshot corresponds to a different value of the tangential velocity $\mu$ of the lid, which has been sampled uniformly between 0.01 m/s and 1 m/s.
|
||||||
#
|
#
|
||||||
@@ -40,7 +43,6 @@ from pina import LabelTensor
|
|||||||
# In[2]:
|
# In[2]:
|
||||||
|
|
||||||
|
|
||||||
get_ipython().system('pip install git+https://github.com/mathLab/Smithers.git #if required --break-system-packages')
|
|
||||||
import smithers
|
import smithers
|
||||||
from smithers.dataset import LidCavity
|
from smithers.dataset import LidCavity
|
||||||
dataset = LidCavity()
|
dataset = LidCavity()
|
||||||
@@ -165,7 +167,7 @@ u_test_rbf = pod_rbfu(mu_test)
|
|||||||
|
|
||||||
# Finally we can calculate the relative error for our model:
|
# Finally we can calculate the relative error for our model:
|
||||||
|
|
||||||
# In[ ]:
|
# In[9]:
|
||||||
|
|
||||||
|
|
||||||
relative_u_error_train = torch.norm(u_train_rbf - u_train)/torch.norm(u_train)
|
relative_u_error_train = torch.norm(u_train_rbf - u_train)/torch.norm(u_train)
|
||||||
@@ -178,7 +180,7 @@ print(f' Test: {relative_u_error_test.item():e}')
|
|||||||
|
|
||||||
# The results are promising! Now let's visualise them, comparing four random predicted snapshots to the true ones:
|
# The results are promising! Now let's visualise them, comparing four random predicted snapshots to the true ones:
|
||||||
|
|
||||||
# In[ ]:
|
# In[10]:
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -212,7 +214,7 @@ plt.show()
|
|||||||
|
|
||||||
# Overall we have reached a good level of approximation while avoiding time-consuming training procedures. Let's try doing the same to predict the pressure snapshots:
|
# Overall we have reached a good level of approximation while avoiding time-consuming training procedures. Let's try doing the same to predict the pressure snapshots:
|
||||||
|
|
||||||
# In[ ]:
|
# In[11]:
|
||||||
|
|
||||||
|
|
||||||
'''create the model'''
|
'''create the model'''
|
||||||
@@ -235,7 +237,7 @@ print(f' Test: {relative_p_error_test.item():e}')
|
|||||||
|
|
||||||
# Unfortunately here we obtain a very high relative test error, although this is likely due to the nature of the available data. Looking at the plots we can see that the pressure field is subject to high variations between subsequent snapshots, especially here:
|
# Unfortunately here we obtain a very high relative test error, although this is likely due to the nature of the available data. Looking at the plots we can see that the pressure field is subject to high variations between subsequent snapshots, especially here:
|
||||||
|
|
||||||
# In[ ]:
|
# In[12]:
|
||||||
|
|
||||||
|
|
||||||
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
|
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
|
||||||
@@ -250,7 +252,7 @@ plt.show()
|
|||||||
|
|
||||||
# Or here:
|
# Or here:
|
||||||
|
|
||||||
# In[ ]:
|
# In[13]:
|
||||||
|
|
||||||
|
|
||||||
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
|
fig, axs = plt.subplots(2, 3, figsize=(14, 6))
|
||||||
@@ -264,7 +266,7 @@ plt.show()
|
|||||||
|
|
||||||
# Scrolling through the velocity snapshots we can observe a more regular behaviour, with no such variations in subsequent snapshots. Moreover, if we decide not to consider the abovementioned "problematic" snapshots, we can already observe a huge improvement:
|
# Scrolling through the velocity snapshots we can observe a more regular behaviour, with no such variations in subsequent snapshots. Moreover, if we decide not to consider the abovementioned "problematic" snapshots, we can already observe a huge improvement:
|
||||||
|
|
||||||
# In[ ]:
|
# In[14]:
|
||||||
|
|
||||||
|
|
||||||
'''excluding problematic snapshots'''
|
'''excluding problematic snapshots'''
|
||||||
|
|||||||
3
tutorials/tutorial2/tutorial.ipynb
vendored
3
tutorials/tutorial2/tutorial.ipynb
vendored
@@ -16,7 +16,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"id": "ad0b8dd7",
|
"id": "ad0b8dd7",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
@@ -43,7 +43,6 @@
|
|||||||
"from pina.domain import CartesianDomain\n",
|
"from pina.domain import CartesianDomain\n",
|
||||||
"from pina.equation import Equation, FixedValue\n",
|
"from pina.equation import Equation, FixedValue\n",
|
||||||
"from pina import Condition, LabelTensor\n",
|
"from pina import Condition, LabelTensor\n",
|
||||||
"from pina.callback import MetricTracker\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"from lightning.pytorch.loggers import TensorBoardLogger\n",
|
"from lightning.pytorch.loggers import TensorBoardLogger\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
|||||||
94
tutorials/tutorial8/tutorial.ipynb
vendored
94
tutorials/tutorial8/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
13
tutorials/tutorial8/tutorial.py
vendored
13
tutorials/tutorial8/tutorial.py
vendored
@@ -16,7 +16,7 @@
|
|||||||
# Let's start with the necessary imports.
|
# Let's start with the necessary imports.
|
||||||
# It's important to note the minimum PINA version to run this tutorial is the `0.1`.
|
# It's important to note the minimum PINA version to run this tutorial is the `0.1`.
|
||||||
|
|
||||||
# In[1]:
|
# In[ ]:
|
||||||
|
|
||||||
|
|
||||||
## routine needed to run the notebook on Google Colab
|
## routine needed to run the notebook on Google Colab
|
||||||
@@ -31,9 +31,9 @@ if IN_COLAB:
|
|||||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
plt.style.use('tableau-colorblind10')
|
|
||||||
import torch
|
import torch
|
||||||
import pina
|
import pina
|
||||||
|
import warnings
|
||||||
|
|
||||||
from pina.domain import CartesianDomain
|
from pina.domain import CartesianDomain
|
||||||
from pina.optim import TorchOptimizer
|
from pina.optim import TorchOptimizer
|
||||||
@@ -43,6 +43,8 @@ from pina import Condition, LabelTensor, Trainer
|
|||||||
from pina.model import FeedForward
|
from pina.model import FeedForward
|
||||||
from pina.solver import SupervisedSolver
|
from pina.solver import SupervisedSolver
|
||||||
|
|
||||||
|
warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
|
|
||||||
# We exploit the [Smithers](https://github.com/mathLab/Smithers) library to collect the parametric snapshots. In particular, we use the `NavierStokesDataset` class that contains a set of parametric solutions of the Navier-Stokes equations in a 2D L-shape domain. The parameter is the inflow velocity.
|
# We exploit the [Smithers](https://github.com/mathLab/Smithers) library to collect the parametric snapshots. In particular, we use the `NavierStokesDataset` class that contains a set of parametric solutions of the Navier-Stokes equations in a 2D L-shape domain. The parameter is the inflow velocity.
|
||||||
# The dataset is composed by 500 snapshots of the velocity (along $x$, $y$, and the magnitude) and pressure fields, and the corresponding parameter values.
|
# The dataset is composed by 500 snapshots of the velocity (along $x$, $y$, and the magnitude) and pressure fields, and the corresponding parameter values.
|
||||||
@@ -52,7 +54,6 @@ from pina.solver import SupervisedSolver
|
|||||||
# In[2]:
|
# In[2]:
|
||||||
|
|
||||||
|
|
||||||
get_ipython().system('pip install git+https://github.com/mathLab/Smithers.git')
|
|
||||||
import smithers
|
import smithers
|
||||||
from smithers.dataset import NavierStokesDataset
|
from smithers.dataset import NavierStokesDataset
|
||||||
dataset = NavierStokesDataset()
|
dataset = NavierStokesDataset()
|
||||||
@@ -83,7 +84,7 @@ u_train, u_test = u[:n_train], u[n_train:]
|
|||||||
p_train, p_test = p[:n_train], p[n_train:]
|
p_train, p_test = p[:n_train], p[n_train:]
|
||||||
|
|
||||||
|
|
||||||
# It is now time to define the problem! We inherit from `ParametricProblem` (since the space invariant typically of this methodology), just defining a simple *input-output* condition.
|
# It is now time to define the problem! We inherit from `ParametricProblem` (since the space invariance typical of this methodology), just defining a simple *input-target* condition.
|
||||||
|
|
||||||
# In[4]:
|
# In[4]:
|
||||||
|
|
||||||
@@ -93,7 +94,7 @@ class SnapshotProblem(ParametricProblem):
|
|||||||
parameter_domain = CartesianDomain({'mu': [0, 100]})
|
parameter_domain = CartesianDomain({'mu': [0, 100]})
|
||||||
|
|
||||||
conditions = {
|
conditions = {
|
||||||
'io': Condition(input_points=p_train, output_points=u_train)
|
'io': Condition(input=p_train, target=u_train)
|
||||||
}
|
}
|
||||||
|
|
||||||
poisson_problem = SnapshotProblem()
|
poisson_problem = SnapshotProblem()
|
||||||
@@ -158,7 +159,7 @@ pod_nn.fit_pod(u_train)
|
|||||||
pod_nn_stokes = SupervisedSolver(
|
pod_nn_stokes = SupervisedSolver(
|
||||||
problem=poisson_problem,
|
problem=poisson_problem,
|
||||||
model=pod_nn,
|
model=pod_nn,
|
||||||
optimizer=TorchOptimizer(torch.optim.Adam))
|
optimizer=TorchOptimizer(torch.optim.Adam, lr=0.0001))
|
||||||
|
|
||||||
|
|
||||||
# Now that we have set the `Problem` and the `Model`, we have just to train the model and use it for predicting the test snapshots.
|
# Now that we have set the `Problem` and the `Model`, we have just to train the model and use it for predicting the test snapshots.
|
||||||
|
|||||||
85
tutorials/tutorial9/tutorial.ipynb
vendored
85
tutorials/tutorial9/tutorial.ipynb
vendored
File diff suppressed because one or more lines are too long
2
tutorials/tutorial9/tutorial.py
vendored
2
tutorials/tutorial9/tutorial.py
vendored
@@ -28,7 +28,7 @@ if IN_COLAB:
|
|||||||
|
|
||||||
import torch
|
import torch
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
plt.style.use('tableau-colorblind10')
|
|
||||||
from pina import Condition
|
from pina import Condition
|
||||||
from pina.problem import SpatialProblem
|
from pina.problem import SpatialProblem
|
||||||
from pina.operator import laplacian
|
from pina.operator import laplacian
|
||||||
|
|||||||
Reference in New Issue
Block a user