Update tutorials 1 through 12 to current version 0.2
This commit is contained in:
committed by
Nicola Demo
parent
8b797d589a
commit
d83ca3af6e
54
tutorials/tutorial1/tutorial.py
vendored
54
tutorials/tutorial1/tutorial.py
vendored
@@ -53,7 +53,7 @@
|
||||
# What if our equation is also time-dependent? In this case, our `class` will inherit from both `SpatialProblem` and `TimeDependentProblem`:
|
||||
#
|
||||
|
||||
# In[ ]:
|
||||
# In[1]:
|
||||
|
||||
|
||||
## routine needed to run the notebook on Google Colab
|
||||
@@ -89,11 +89,11 @@ class TimeSpaceODE(SpatialProblem, TimeDependentProblem):
|
||||
#
|
||||
# Once the `Problem` class is initialized, we need to represent the differential equation in **PINA**. In order to do this, we need to load the **PINA** operators from `pina.operators` module. Again, we'll consider Equation (1) and represent it in **PINA**:
|
||||
|
||||
# In[2]:
|
||||
# In[ ]:
|
||||
|
||||
|
||||
from pina.problem import SpatialProblem
|
||||
from pina.operators import grad
|
||||
from pina.operator import grad
|
||||
from pina import Condition
|
||||
from pina.domain import CartesianDomain
|
||||
from pina.equation import Equation, FixedValue
|
||||
@@ -106,6 +106,11 @@ class SimpleODE(SpatialProblem):
|
||||
output_variables = ['u']
|
||||
spatial_domain = CartesianDomain({'x': [0, 1]})
|
||||
|
||||
domains ={
|
||||
'x0': CartesianDomain({'x': 0.}),
|
||||
'D': CartesianDomain({'x': [0, 1]})
|
||||
}
|
||||
|
||||
# defining the ode equation
|
||||
def ode_equation(input_, output_):
|
||||
|
||||
@@ -120,13 +125,10 @@ class SimpleODE(SpatialProblem):
|
||||
|
||||
# conditions to hold
|
||||
conditions = {
|
||||
'x0': Condition(location=CartesianDomain({'x': 0.}), equation=FixedValue(1)), # We fix initial condition to value 1
|
||||
'D': Condition(location=CartesianDomain({'x': [0, 1]}), equation=Equation(ode_equation)), # We wrap the python equation using Equation
|
||||
'bound_cond': Condition(domain='x0', equation=FixedValue(1.)),
|
||||
'phys_cond': Condition(domain='D', equation=Equation(ode_equation))
|
||||
}
|
||||
|
||||
# sampled points (see below)
|
||||
input_pts = None
|
||||
|
||||
# defining the true solution
|
||||
def truth_solution(self, pts):
|
||||
return torch.exp(pts.extract(['x']))
|
||||
@@ -149,14 +151,14 @@ problem = SimpleODE()
|
||||
|
||||
|
||||
# sampling 20 points in [0, 1] through discretization in all locations
|
||||
problem.discretise_domain(n=20, mode='grid', variables=['x'], locations='all')
|
||||
problem.discretise_domain(n=20, mode='grid', domains='all')
|
||||
|
||||
# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0
|
||||
problem.discretise_domain(n=20, mode='latin', variables=['x'], locations=['D'])
|
||||
problem.discretise_domain(n=1, mode='random', variables=['x'], locations=['x0'])
|
||||
problem.discretise_domain(n=20, mode='latin', domains=['D'])
|
||||
problem.discretise_domain(n=1, mode='random', domains=['x0'])
|
||||
|
||||
# sampling 20 points in (0, 1) randomly
|
||||
problem.discretise_domain(n=20, mode='random', variables=['x'])
|
||||
problem.discretise_domain(n=20, mode='random')
|
||||
|
||||
|
||||
# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`.
|
||||
@@ -165,8 +167,8 @@ problem.discretise_domain(n=20, mode='random', variables=['x'])
|
||||
|
||||
|
||||
# sampling for training
|
||||
problem.discretise_domain(1, 'random', locations=['x0'])
|
||||
problem.discretise_domain(20, 'lh', locations=['D'])
|
||||
problem.discretise_domain(20, 'random', domains=['x0']) # TODO check
|
||||
problem.discretise_domain(20, 'lh', domains=['D'])
|
||||
|
||||
|
||||
# The points are saved in a python `dict`, and can be accessed by calling the attribute `input_pts` of the problem
|
||||
@@ -174,26 +176,26 @@ problem.discretise_domain(20, 'lh', locations=['D'])
|
||||
# In[5]:
|
||||
|
||||
|
||||
print('Input points:', problem.input_pts)
|
||||
print('Input points labels:', problem.input_pts['D'].labels)
|
||||
print('Input points:', problem.discretised_domains)
|
||||
print('Input points labels:', problem.discretised_domains['D'].labels)
|
||||
|
||||
|
||||
# To visualize the sampled points we can use the `.plot_samples` method of the `Plotter` class
|
||||
|
||||
# In[5]:
|
||||
# In[6]:
|
||||
|
||||
|
||||
from pina import Plotter
|
||||
#from pina import Plotter
|
||||
|
||||
pl = Plotter()
|
||||
pl.plot_samples(problem=problem)
|
||||
#pl = Plotter()
|
||||
#pl.plot_samples(problem=problem)
|
||||
|
||||
|
||||
# ## Perform a small training
|
||||
|
||||
# Once we have defined the problem and generated the data we can start the modelling. Here we will choose a `FeedForward` neural network available in `pina.model`, and we will train using the `PINN` solver from `pina.solvers`. We highlight that this training is fairly simple, for more advanced stuff consider the tutorials in the ***Physics Informed Neural Networks*** section of ***Tutorials***. For training we use the `Trainer` class from `pina.trainer`. Here we show a very short training and some method for plotting the results. Notice that by default all relevant metrics (e.g. MSE error during training) are going to be tracked using a `lightining` logger, by default `CSVLogger`. If you want to track the metric by yourself without a logger, use `pina.callbacks.MetricTracker`.
|
||||
|
||||
# In[ ]:
|
||||
# In[7]:
|
||||
|
||||
|
||||
from pina import Trainer
|
||||
@@ -222,7 +224,7 @@ trainer.train()
|
||||
|
||||
# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightinig` loggers. The final loss can be accessed by `trainer.logged_metrics`
|
||||
|
||||
# In[7]:
|
||||
# In[8]:
|
||||
|
||||
|
||||
# inspecting final loss
|
||||
@@ -231,19 +233,19 @@ trainer.logged_metrics
|
||||
|
||||
# By using the `Plotter` class from **PINA** we can also do some quatitative plots of the solution.
|
||||
|
||||
# In[8]:
|
||||
# In[9]:
|
||||
|
||||
|
||||
# plotting the solution
|
||||
pl.plot(solver=pinn)
|
||||
#pl.plot(solver=pinn)
|
||||
|
||||
|
||||
# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also plot easily the loss:
|
||||
|
||||
# In[9]:
|
||||
# In[10]:
|
||||
|
||||
|
||||
pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
|
||||
#pl.plot_loss(trainer=trainer, label = 'mean_loss', logy=True)
|
||||
|
||||
|
||||
# As we can see the loss has not reached a minimum, suggesting that we could train for longer
|
||||
|
||||
Reference in New Issue
Block a user