fix old codes
This commit is contained in:
@@ -12,7 +12,8 @@ The problem is written as: :raw-latex:`\begin{equation}
|
||||
\Delta u = \sin{(\pi x)} \sin{(\pi y)} \text{ in } D, \\
|
||||
u = 0 \text{ on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4,
|
||||
\end{cases}
|
||||
\end{equation}` where :math:`D` is a square domain :math:`[0,1]^2`, and
|
||||
\end{equation}`
|
||||
where :math:`D` is a square domain :math:`[0,1]^2`, and
|
||||
:math:`\Gamma_i`, with :math:`i=1,...,4`, are the boundaries of the
|
||||
square.
|
||||
|
||||
@@ -56,11 +57,16 @@ be compared with the predicted one.
|
||||
return output_['u'] - value
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(Span({'x': bounds_x, 'y': bounds_y[-1]}), nil_dirichlet),
|
||||
'gamma2': Condition(Span({'x': bounds_x, 'y': bounds_y[0]}), nil_dirichlet),
|
||||
'gamma3': Condition(Span({'x': bounds_x[-1], 'y': bounds_y}), nil_dirichlet),
|
||||
'gamma4': Condition(Span({'x': bounds_x[0], 'y': bounds_y}), nil_dirichlet),
|
||||
'D': Condition(Span({'x': bounds_x, 'y': bounds_y}), laplace_equation),
|
||||
'gamma1': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y[-1]}), nil_dirichlet),
|
||||
'gamma2': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y[0]}), nil_dirichlet),
|
||||
'gamma3': Condition(
|
||||
Span({'x': bounds_x[-1], 'y': bounds_y}), nil_dirichlet),
|
||||
'gamma4': Condition(
|
||||
Span({'x': bounds_x[0], 'y': bounds_y}), nil_dirichlet),
|
||||
'D': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y}), laplace_equation),
|
||||
}
|
||||
def poisson_sol(self, x, y):
|
||||
return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
|
||||
@@ -91,19 +97,13 @@ training phase of the PINN.
|
||||
input_variables=poisson_problem.input_variables)
|
||||
|
||||
pinn = PINN(poisson_problem, model, lr=0.003, regularizer=1e-8)
|
||||
pinn.span_pts(20, 'grid', ['D'])
|
||||
pinn.span_pts(20, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.span_pts(20, 'grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.span_pts(20, 'grid', locations=['D'])
|
||||
pinn.train(5000, 100)
|
||||
|
||||
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
2.384537034558816e-05
|
||||
|
||||
|
||||
|
||||
The loss trend is saved in a dedicated txt file located in
|
||||
*tutorial1_files*.
|
||||
|
||||
@@ -160,8 +160,8 @@ the cell below is also in this case the final loss of PINN.
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return (torch.sin(x['x']*torch.pi) *
|
||||
torch.sin(x['y']*torch.pi))
|
||||
return LabelTensor(torch.sin(x.extract(['x'])*torch.pi) *
|
||||
torch.sin(x.extract(['y'])*torch.pi), 'k')
|
||||
|
||||
feat = [myFeature()]
|
||||
model_feat = FeedForward(layers=[10, 10],
|
||||
@@ -170,18 +170,13 @@ the cell below is also in this case the final loss of PINN.
|
||||
extra_features=feat)
|
||||
|
||||
pinn_feat = PINN(poisson_problem, model_feat, lr=0.003, regularizer=1e-8)
|
||||
pinn_feat.span_pts(20, 'grid', ['D'])
|
||||
pinn_feat.span_pts(20, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn_feat.span_pts(20, 'grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.feat_span_pts(20, 'grid', locations=['D'])
|
||||
pinn_feat.train(5000, 100)
|
||||
|
||||
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
7.93498870023341e-07
|
||||
|
||||
|
||||
|
||||
The losses are saved in a txt file as for the basic Poisson case.
|
||||
|
||||
@@ -208,8 +203,8 @@ represented below.
|
||||
The problem solution with learnable extra-features
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Another way to predict the solution is to add a parametric forcing term
|
||||
of the Laplace equation as an extra-feature. The parameters added in the
|
||||
Another way to predict the solution is to add a parametric extra-feature.
|
||||
The parameters added in the
|
||||
expression of the extra-feature are learned during the training phase of
|
||||
the neural network. For example, considering two parameters, the
|
||||
parameteric extra-feature is written as:
|
||||
@@ -218,75 +213,26 @@ parameteric extra-feature is written as:
|
||||
\mathbf{k}(\mathbf{x}, \mathbf{y}) = \beta \sin{(\alpha \mathbf{x})} \sin{(\alpha \mathbf{y})}
|
||||
\end{equation}`
|
||||
|
||||
The new Poisson problem is defined in the dedicated class
|
||||
*ParametricPoisson*, where the domain is no more only spatial, but
|
||||
includes the parameters’ space. In our case, the parameters’ bounds are
|
||||
0 and 30.
|
||||
|
||||
.. code:: ipython3
|
||||
|
||||
from pina.problem import ParametricProblem
|
||||
|
||||
class ParametricPoisson(SpatialProblem, ParametricProblem):
|
||||
bounds_x = [0, 1]
|
||||
bounds_y = [0, 1]
|
||||
bounds_alpha = [0, 30]
|
||||
bounds_beta = [0, 30]
|
||||
spatial_variables = ['x', 'y']
|
||||
parameters = ['alpha', 'beta']
|
||||
output_variables = ['u']
|
||||
domain = Span({'x': bounds_x, 'y': bounds_y})
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = (torch.sin(input_['x']*torch.pi) *
|
||||
torch.sin(input_['y']*torch.pi))
|
||||
return nabla(output_['u'], input_).flatten() - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y[1], 'alpha': bounds_alpha, 'beta': bounds_beta}),
|
||||
nil_dirichlet),
|
||||
'gamma2': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y[0], 'alpha': bounds_alpha, 'beta': bounds_beta}),
|
||||
nil_dirichlet),
|
||||
'gamma3': Condition(
|
||||
Span({'x': bounds_x[1], 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
|
||||
nil_dirichlet),
|
||||
'gamma4': Condition(
|
||||
Span({'x': bounds_x[0], 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
|
||||
nil_dirichlet),
|
||||
'D': Condition(
|
||||
Span({'x': bounds_x, 'y': bounds_y, 'alpha': bounds_alpha, 'beta': bounds_beta}),
|
||||
laplace_equation),
|
||||
}
|
||||
|
||||
def poisson_sol(self, x, y):
|
||||
return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
|
||||
|
||||
|
||||
Here, as done for the other cases, the new parametric feature is defined
|
||||
and the neural network is re-initialized and trained, considering as two
|
||||
additional parameters :math:`\alpha` and :math:`\beta`.
|
||||
and the neural network is re-initialized and trained.
|
||||
|
||||
.. code:: ipython3
|
||||
|
||||
param_poisson_problem = ParametricPoisson()
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
class LearnableFeature(torch.nn.Module):
|
||||
"""
|
||||
"""
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
self.beta = torch.nn.Parameter(torch.Tensor([1.0]))
|
||||
self.alpha = torch.nn.Parameter(torch.Tensor([1.0]))
|
||||
|
||||
def forward(self, x):
|
||||
return (x['beta']*torch.sin(x['alpha']*x['x']*torch.pi)*
|
||||
torch.sin(x['alpha']*x['y']*torch.pi))
|
||||
return LabelTensor(
|
||||
self.beta*torch.sin(self.alpha*x.extract(['x'])*torch.pi)*
|
||||
torch.sin(self.alpha*x.extract(['y'])*torch.pi),
|
||||
'k')
|
||||
|
||||
feat = [myFeature()]
|
||||
feat = [LearnableFeature()]
|
||||
model_learn = FeedForward(layers=[10, 10],
|
||||
output_variables=param_poisson_problem.output_variables,
|
||||
input_variables=param_poisson_problem.input_variables,
|
||||
@@ -300,12 +246,6 @@ additional parameters :math:`\alpha` and :math:`\beta`.
|
||||
|
||||
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
3.265163986679126e-06
|
||||
|
||||
|
||||
|
||||
The losses are saved as for the other two cases trained above.
|
||||
|
||||
.. code:: ipython3
|
||||
@@ -316,7 +256,7 @@ The losses are saved as for the other two cases trained above.
|
||||
pinn_learn.save_state('tutorial1_files/pina.poisson_learn_feat')
|
||||
|
||||
Here the plots for the prediction error (below on the right) shows that
|
||||
the prediction coming from the **parametric PINN** is more accurate than
|
||||
the prediction coming from the latter version is more accurate than
|
||||
the one of the basic version of PINN.
|
||||
|
||||
.. code:: ipython3
|
||||
|
||||
@@ -14,13 +14,12 @@ class Burgers1D(TimeDependentProblem, SpatialProblem):
|
||||
domain = Span({'x': [-1, 1], 't': [0, 1]})
|
||||
|
||||
def burger_equation(input_, output_):
|
||||
grad_u = grad(output_.extract(['u']), input_)
|
||||
grad_x = grad_u.extract(['x'])
|
||||
grad_t = grad_u.extract(['t'])
|
||||
gradgrad_u_x = grad(grad_u.extract(['x']), input_)
|
||||
du = grad(output_, input_)
|
||||
ddu = grad(du, input_, components=['dudx'])
|
||||
return (
|
||||
grad_u.extract(['t']) + output_.extract(['u'])*grad_u.extract(['x']) -
|
||||
(0.01/torch.pi)*gradgrad_u_x.extract(['x'])
|
||||
du.extract(['dudt']) +
|
||||
output_.extract(['u'])*du.extract(['dudx']) -
|
||||
(0.01/torch.pi)*ddu.extract(['ddudxdx'])
|
||||
)
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
|
||||
@@ -1,52 +1,59 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
|
||||
xmin, xmax, ymin, ymax = -1, 1, -1, 1
|
||||
|
||||
class ParametricEllipticOptimalControl(Problem2D):
|
||||
|
||||
def __init__(self, alpha=1):
|
||||
|
||||
def term1(input_, param_, output_):
|
||||
grad_p = self.grad(output_['p'], input_)
|
||||
gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
|
||||
gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
|
||||
#print('mu', input_['mu'])
|
||||
return output_['y'] - input_['mu'] - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
|
||||
|
||||
def term2(input_, param_, output_):
|
||||
grad_y = self.grad(output_['y'], input_)
|
||||
gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
|
||||
gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
|
||||
return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param']
|
||||
|
||||
def term3(input_, param_, output_):
|
||||
#print('a', input_['alpha'], output_['p'], output_['u_param'])
|
||||
return output_['p'] - output_['u_param']*input_['alpha']
|
||||
from pina import Span, Condition
|
||||
from pina.problem import SpatialProblem, ParametricProblem
|
||||
from pina.operators import grad, nabla
|
||||
|
||||
|
||||
def nil_dirichlet(input_, param_, output_):
|
||||
y_value = 0.0
|
||||
p_value = 0.0
|
||||
return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
|
||||
class ParametricEllipticOptimalControl(SpatialProblem, ParametricProblem):
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
|
||||
'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': [term1, term2]},
|
||||
#'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
|
||||
#'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
|
||||
}
|
||||
xmin, xmax, ymin, ymax = -1, 1, -1, 1
|
||||
amin, amax = 0.0001, 1
|
||||
mumin, mumax = 0.5, 3
|
||||
mu_range = [mumin, mumax]
|
||||
a_range = [amin, amax]
|
||||
x_range = [xmin, xmax]
|
||||
y_range = [ymin, ymax]
|
||||
|
||||
self.input_variables = ['x1', 'x2']
|
||||
self.output_variables = ['u', 'p', 'y']
|
||||
self.parameters = ['mu', 'alpha']
|
||||
self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
|
||||
self.parameter_domain = np.array([[0.5, 3], [0.0001, 1]])
|
||||
spatial_variables = ['x1', 'x2']
|
||||
parameters = ['mu', 'alpha']
|
||||
output_variables = ['u', 'p', 'y']
|
||||
domain = Span({
|
||||
'x1': x_range, 'x2': y_range, 'mu': mu_range, 'alpha': a_range})
|
||||
|
||||
|
||||
def term1(input_, output_):
|
||||
laplace_p = nabla(output_, input_, components=['p'], d=['x1', 'x2'])
|
||||
return output_.extract(['y']) - input_.extract(['mu']) - laplace_p
|
||||
|
||||
def term2(input_, output_):
|
||||
laplace_y = nabla(output_, input_, components=['y'], d=['x1', 'x2'])
|
||||
return - laplace_y - output_.extract(['u_param'])
|
||||
|
||||
def state_dirichlet(input_, output_):
|
||||
y_exp = 0.0
|
||||
return output_.extract(['y']) - y_exp
|
||||
|
||||
def adj_dirichlet(input_, output_):
|
||||
p_exp = 0.0
|
||||
return output_.extract(['p']) - p_exp
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(
|
||||
Span({'x1': x_range, 'x2': 1, 'mu': mu_range, 'alpha': a_range}),
|
||||
[state_dirichlet, adj_dirichlet]),
|
||||
'gamma2': Condition(
|
||||
Span({'x1': x_range, 'x2': -1, 'mu': mu_range, 'alpha': a_range}),
|
||||
[state_dirichlet, adj_dirichlet]),
|
||||
'gamma3': Condition(
|
||||
Span({'x1': 1, 'x2': y_range, 'mu': mu_range, 'alpha': a_range}),
|
||||
[state_dirichlet, adj_dirichlet]),
|
||||
'gamma4': Condition(
|
||||
Span({'x1': -1, 'x2': y_range, 'mu': mu_range, 'alpha': a_range}),
|
||||
[state_dirichlet, adj_dirichlet]),
|
||||
'D': Condition(
|
||||
Span({'x1': x_range, 'x2': y_range,
|
||||
'mu': mu_range, 'alpha': a_range}),
|
||||
[term1, term2]),
|
||||
}
|
||||
|
||||
@@ -14,8 +14,8 @@ class ParametricPoisson(SpatialProblem, ParametricProblem):
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = torch.exp(
|
||||
- 2*(input_.extract(['x']) - input_.extract(['mu1']))**2 - 2*(input_.extract(['y']) -
|
||||
input_.extract(['mu2']))**2)
|
||||
- 2*(input_.extract(['x']) - input_.extract(['mu1']))**2
|
||||
- 2*(input_.extract(['y']) - input_.extract(['mu2']))**2)
|
||||
return nabla(output_.extract(['u']), input_) - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
|
||||
@@ -23,11 +23,11 @@ class Poisson(SpatialProblem):
|
||||
return output_.extract(['u']) - value
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(Span({'x': [-1, 1], 'y': 1}), nil_dirichlet),
|
||||
'gamma2': Condition(Span({'x': [-1, 1], 'y': -1}), nil_dirichlet),
|
||||
'gamma3': Condition(Span({'x': 1, 'y': [-1, 1]}), nil_dirichlet),
|
||||
'gamma4': Condition(Span({'x': -1, 'y': [-1, 1]}), nil_dirichlet),
|
||||
'D': Condition(Span({'x': [-1, 1], 'y': [-1, 1]}), laplace_equation),
|
||||
'gamma1': Condition(Span({'x': [0, 1], 'y': 1}), nil_dirichlet),
|
||||
'gamma2': Condition(Span({'x': [0, 1], 'y': 0}), nil_dirichlet),
|
||||
'gamma3': Condition(Span({'x': 1, 'y': [0, 1]}), nil_dirichlet),
|
||||
'gamma4': Condition(Span({'x': 0, 'y': [0, 1]}), nil_dirichlet),
|
||||
'D': Condition(Span({'x': [0, 1], 'y': [0, 1]}), laplace_equation),
|
||||
}
|
||||
|
||||
def poisson_sol(self, x, y):
|
||||
|
||||
@@ -2,9 +2,9 @@ import argparse
|
||||
import torch
|
||||
from torch.nn import Softplus
|
||||
|
||||
from pina import PINN, Plotter
|
||||
from pina import PINN, Plotter, LabelTensor
|
||||
from pina.model import FeedForward
|
||||
from problems.burgers import Burgers1D
|
||||
from burger2 import Burgers1D
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
@@ -16,7 +16,7 @@ class myFeature(torch.nn.Module):
|
||||
self.idx = idx
|
||||
|
||||
def forward(self, x):
|
||||
return torch.sin(torch.pi * x[:, self.idx])
|
||||
return LabelTensor(torch.sin(torch.pi * x.extract(['x'])), ['sin(x)'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -45,12 +45,14 @@ if __name__ == "__main__":
|
||||
model,
|
||||
lr=0.006,
|
||||
error_norm='mse',
|
||||
regularizer=0,
|
||||
lr_accelerate=None)
|
||||
regularizer=0)
|
||||
|
||||
if args.s:
|
||||
pinn.span_pts(2000, 'latin', ['D'])
|
||||
pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 't0'])
|
||||
pinn.span_pts(
|
||||
{'n': 200, 'mode': 'random', 'variables': 't'},
|
||||
{'n': 20, 'mode': 'random', 'variables': 'x'},
|
||||
locations=['D'])
|
||||
pinn.span_pts(150, 'random', location=['gamma1', 'gamma2', 't0'])
|
||||
pinn.train(5000, 100)
|
||||
pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features))
|
||||
else:
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
import argparse
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from pina.adaptive_functions.adaptive_softplus import AdaptiveSoftplus
|
||||
from problems.parametric_elliptic_optimal_control_alpha_variable import ParametricEllipticOptimalControl
|
||||
from pina.multi_deep_feed_forward import MultiDeepFeedForward
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
alpha = 1
|
||||
from pina import PINN, LabelTensor
|
||||
from parametric_elliptic_optimal_control_alpha_variable2 import ParametricEllipticOptimalControl
|
||||
from pina.model import MultiFeedForward, FeedForward
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
@@ -21,46 +16,21 @@ class myFeature(torch.nn.Module):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return (-x[:, 0]**2+1) * (-x[:, 1]**2+1)
|
||||
t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1)
|
||||
return LabelTensor(t, ['k0'])
|
||||
|
||||
|
||||
class CustomMultiDFF(MultiDeepFeedForward):
|
||||
class CustomMultiDFF(MultiFeedForward):
|
||||
|
||||
def __init__(self, dff_dict):
|
||||
super().__init__(dff_dict)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.uu(x)
|
||||
p = LabelTensor((out['u_param'] * x[:, 3]).reshape(-1, 1), ['p'])
|
||||
a = LabelTensor.hstack([out, p])
|
||||
return a
|
||||
|
||||
model = CustomMultiDFF(
|
||||
{
|
||||
'uu': {
|
||||
'input_variables': ['x1', 'x2', 'mu', 'alpha'],
|
||||
'output_variables': ['u_param', 'y'],
|
||||
'layers': [40, 40, 20],
|
||||
'func': Softplus,
|
||||
'extra_features': [myFeature()],
|
||||
},
|
||||
# 'u_param': {
|
||||
# 'input_variables': ['u', 'mu'],
|
||||
# 'output_variables': ['u_param'],
|
||||
# 'layers': [],
|
||||
# 'func': None
|
||||
# },
|
||||
# 'p': {
|
||||
# 'input_variables': ['u'],
|
||||
# 'output_variables': ['p'],
|
||||
# 'layers': [10],
|
||||
# 'func': None
|
||||
# },
|
||||
}
|
||||
)
|
||||
p = LabelTensor((out.extract(['u_param']) * x.extract(['alpha'])), ['p'])
|
||||
return out.append(p)
|
||||
|
||||
|
||||
opc = ParametricEllipticOptimalControl(alpha)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -70,138 +40,39 @@ if __name__ == "__main__":
|
||||
group.add_argument("-l", "-load", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
# model = DeepFeedForward(
|
||||
# layers=[40, 40, 20],
|
||||
# output_variables=['u_param', 'y', 'p'],
|
||||
# input_variables=opc.input_variables+['mu', 'alpha'],
|
||||
# func=Softplus,
|
||||
# extra_features=[myFeature()]
|
||||
# )
|
||||
opc = ParametricEllipticOptimalControl()
|
||||
model = CustomMultiDFF(
|
||||
{
|
||||
'uu': {
|
||||
'input_variables': ['x1', 'x2', 'mu', 'alpha'],
|
||||
'output_variables': ['u_param', 'y'],
|
||||
'layers': [40, 40, 20],
|
||||
'func': Softplus,
|
||||
'extra_features': [myFeature()],
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
pinn = pPINN(
|
||||
pinn = PINN(
|
||||
opc,
|
||||
model,
|
||||
lr=0.002,
|
||||
error_norm='mse',
|
||||
regularizer=1e-8,
|
||||
lr_accelerate=None)
|
||||
regularizer=1e-8)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(30, 'grid', ['D1'])
|
||||
pinn.span_pts(50, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.train(10000, 20)
|
||||
# with open('ocp_wrong_history.txt', 'w') as file_:
|
||||
# for i, losses in enumerate(pinn.history):
|
||||
# file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
pinn.span_pts(
|
||||
{'variables': ['x1', 'x2'], 'mode': 'random', 'n': 100},
|
||||
{'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
|
||||
locations=['D'])
|
||||
pinn.span_pts(
|
||||
{'variables': ['x1', 'x2'], 'mode': 'grid', 'n': 20},
|
||||
{'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
|
||||
locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
|
||||
pinn.train(10000, 20)
|
||||
pinn.save_state('pina.ocp')
|
||||
|
||||
else:
|
||||
pinn.load_state('working.pina.ocp')
|
||||
pinn.load_state('pina.ocp')
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# res = 64
|
||||
# param = torch.tensor([[3., 1]])
|
||||
# pts_container = []
|
||||
# for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
# pts_container.append(np.linspace(mn, mx, res))
|
||||
# grids_container = np.meshgrid(*pts_container)
|
||||
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
|
||||
|
||||
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
|
||||
# Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
# print(Z_pred.tensor.shape)
|
||||
|
||||
# plt.subplot(2, 3, 1)
|
||||
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 2)
|
||||
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 3)
|
||||
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# with open('ocp_mu3_a1_plot.txt', 'w') as f_:
|
||||
# f_.write('x y u p ys\n')
|
||||
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
|
||||
# Z_pred['u_param'].reshape(-1, 1),
|
||||
# Z_pred['p'].reshape(-1, 1),
|
||||
# Z_pred['y'].reshape(-1, 1),
|
||||
# ):
|
||||
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
|
||||
# param = torch.tensor([[3.0, 0.01]])
|
||||
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
|
||||
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
|
||||
# Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
|
||||
# plt.subplot(2, 3, 4)
|
||||
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 5)
|
||||
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 6)
|
||||
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
|
||||
# plt.show()
|
||||
# with open('ocp_mu3_a0.01_plot.txt', 'w') as f_:
|
||||
# f_.write('x y u p ys\n')
|
||||
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
|
||||
# Z_pred['u_param'].reshape(-1, 1),
|
||||
# Z_pred['p'].reshape(-1, 1),
|
||||
# Z_pred['y'].reshape(-1, 1),
|
||||
# ):
|
||||
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
|
||||
|
||||
|
||||
y = {}
|
||||
u = {}
|
||||
for alpha in [0.01, 0.1, 1]:
|
||||
y[alpha] = []
|
||||
u[alpha] = []
|
||||
for p in np.linspace(0.5, 3, 32):
|
||||
a = pinn.model(LabelTensor(torch.tensor([[0, 0, p, alpha]]).double(), ['x1', 'x2', 'mu', 'alpha']).tensor)
|
||||
y[alpha].append(a['y'].detach().numpy()[0])
|
||||
u[alpha].append(a['u_param'].detach().numpy()[0])
|
||||
|
||||
|
||||
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[1], label='u')
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[0.01], label='u')
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[0.1], label='u')
|
||||
plt.plot([1, 2, 3], [0.28, 0.56, 0.85], 'o', label='Truth values')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
print(y[1])
|
||||
print(y[0.1])
|
||||
print(y[0.01])
|
||||
with open('elliptic_param_y.txt', 'w') as f_:
|
||||
f_.write('mu 1 01 001\n')
|
||||
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), y[1], y[0.1], y[0.01]):
|
||||
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
|
||||
|
||||
with open('elliptic_param_u.txt', 'w') as f_:
|
||||
f_.write('mu 1 01 001\n')
|
||||
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), u[1], u[0.1], u[0.01]):
|
||||
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
|
||||
|
||||
|
||||
plt.plot(np.linspace(0.5, 3, 32), y, label='y')
|
||||
plt.plot([1, 2, 3], [0.062, 0.12, 0.19], 'o', label='Truth values')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import argparse
|
||||
import torch
|
||||
from torch.nn import Softplus
|
||||
from pina import Plotter
|
||||
from pina import PINN as pPINN
|
||||
from problems.parametric_poisson import ParametricPoisson
|
||||
from pina import Plotter, LabelTensor, PINN
|
||||
from parametric_poisson2 import ParametricPoisson
|
||||
from pina.model import FeedForward
|
||||
|
||||
|
||||
@@ -14,7 +13,13 @@ class myFeature(torch.nn.Module):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return torch.exp(- 2*(x.extract(['x']) - x.extract(['mu1']))**2 - 2*(x.extract(['y']) - x.extract(['mu2']))**2)
|
||||
t = (
|
||||
torch.exp(
|
||||
- 2*(x.extract(['x']) - x.extract(['mu1']))**2
|
||||
- 2*(x.extract(['y']) - x.extract(['mu2']))**2
|
||||
)
|
||||
)
|
||||
return LabelTensor(t, ['k0'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -38,21 +43,23 @@ if __name__ == "__main__":
|
||||
extra_features=feat
|
||||
)
|
||||
|
||||
pinn = pPINN(
|
||||
pinn = PINN(
|
||||
poisson_problem,
|
||||
model,
|
||||
lr=0.0006,
|
||||
lr=0.006,
|
||||
regularizer=1e-6)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(500, n_params=10, mode_spatial='random', locations=['D'])
|
||||
pinn.span_pts(200, n_params=10, mode_spatial='random', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.plot_pts()
|
||||
pinn.span_pts(
|
||||
{'variables': ['x', 'y'], 'mode': 'random', 'n': 100},
|
||||
{'variables': ['mu1', 'mu2'], 'mode': 'grid', 'n': 5},
|
||||
locations=['D'])
|
||||
pinn.span_pts(
|
||||
{'variables': ['x', 'y'], 'mode': 'grid', 'n': 20},
|
||||
{'variables': ['mu1', 'mu2'], 'mode': 'grid', 'n': 5},
|
||||
locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.train(10000, 100)
|
||||
with open('param_poisson_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
file_.write('{} {}\n'.format(i, sum(losses)))
|
||||
pinn.save_state('pina.poisson_param')
|
||||
|
||||
else:
|
||||
|
||||
@@ -7,7 +7,7 @@ from torch.nn import ReLU, Tanh, Softplus
|
||||
from pina import PINN, LabelTensor, Plotter
|
||||
from pina.model import FeedForward
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
from problems.poisson import Poisson
|
||||
from poisson2 import Poisson
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
@@ -19,7 +19,9 @@ class myFeature(torch.nn.Module):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return torch.sin(x[:, 0]*torch.pi) * torch.sin(x[:, 1]*torch.pi)
|
||||
t = (torch.sin(x.extract(['x'])*torch.pi) *
|
||||
torch.sin(x.extract(['y'])*torch.pi))
|
||||
return LabelTensor(t, ['sin(x)sin(y)'])
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -51,14 +53,9 @@ if __name__ == "__main__":
|
||||
|
||||
if args.s:
|
||||
|
||||
print(pinn)
|
||||
pinn.span_pts(20, mode_spatial='grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.span_pts(20, mode_spatial='grid', locations=['D'])
|
||||
pinn.plot_pts()
|
||||
pinn.span_pts(20, 'grid', locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.span_pts(20, 'grid', locations=['D'])
|
||||
pinn.train(5000, 100)
|
||||
with open('poisson_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
file_.write('{} {}\n'.format(i, sum(losses)))
|
||||
pinn.save_state('pina.poisson')
|
||||
|
||||
else:
|
||||
|
||||
@@ -39,3 +39,5 @@ class Condition:
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
if hasattr(self, 'function') and not isinstance(self.function, list):
|
||||
self.function = [self.function]
|
||||
|
||||
@@ -55,6 +55,8 @@ class LabelTensor(torch.Tensor):
|
||||
[0.9518, 0.1025],
|
||||
[0.8066, 0.9615]])
|
||||
'''
|
||||
if x.ndim == 1:
|
||||
x = x.reshape(-1, 1)
|
||||
|
||||
if isinstance(labels, str):
|
||||
labels = [labels]
|
||||
@@ -130,3 +132,8 @@ class LabelTensor(torch.Tensor):
|
||||
new_labels = self.labels + lt.labels
|
||||
new_tensor = torch.cat((self, lt), dim=1)
|
||||
return LabelTensor(new_tensor, new_labels)
|
||||
|
||||
def __str__(self):
|
||||
s = f'labels({str(self.labels)})\n'
|
||||
s += super().__str__()
|
||||
return s
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from pina.label_tensor import LabelTensor
|
||||
import warnings
|
||||
import copy
|
||||
from pina import LabelTensor
|
||||
|
||||
|
||||
class DeepONet(torch.nn.Module):
|
||||
"""
|
||||
@@ -75,39 +74,24 @@ class DeepONet(torch.nn.Module):
|
||||
self.trunk_net = trunk_net
|
||||
self.branch_net = branch_net
|
||||
|
||||
if features:
|
||||
# if features:
|
||||
# if len(features) != features_net.layers[0].in_features:
|
||||
# raise ValueError('Incompatible features')
|
||||
# if trunk_out_dim != features_net.layers[-1].out_features:
|
||||
# raise ValueError('Incompatible features')
|
||||
|
||||
self.features = features
|
||||
# self.features = features
|
||||
# self.features_net = nn.Sequential(
|
||||
# nn.Linear(len(features), 10), nn.Softplus(),
|
||||
# # nn.Linear(10, 10), nn.Softplus(),
|
||||
# nn.Linear(10, trunk_out_dim)
|
||||
# )
|
||||
self.features_net = nn.Sequential(
|
||||
nn.Linear(len(features), trunk_out_dim)
|
||||
)
|
||||
|
||||
|
||||
|
||||
# self.features_net = nn.Sequential(
|
||||
# nn.Linear(len(features), trunk_out_dim)
|
||||
# )
|
||||
|
||||
self.reduction = nn.Linear(trunk_out_dim, self.output_dimension)
|
||||
|
||||
# print(self.branch_net.output_variables)
|
||||
# print(self.trunk_net.output_variables)
|
||||
# if isinstance(self.branch_net.output_variables, int) and isinstance(self.branch_net.output_variables, int):
|
||||
# if self.branch_net.output_dimension == self.trunk_net.output_dimension:
|
||||
# self.inner_size = self.branch_net.output_dimension
|
||||
# print('qui')
|
||||
# else:
|
||||
# raise ValueError('Branch and trunk networks have not the same output dimension.')
|
||||
# else:
|
||||
# warnings.warn("The output dimension of the branch and trunk networks has been imposed by default as 10 for each output variable. To set it change the output_variable of networks to an integer.")
|
||||
# self.inner_size = self.output_dimension*inner_size
|
||||
|
||||
@property
|
||||
def input_variables(self):
|
||||
"""The input variables of the model"""
|
||||
@@ -121,19 +105,33 @@ class DeepONet(torch.nn.Module):
|
||||
:return: the output computed by the model.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
input_feature = []
|
||||
for feature in self.features:
|
||||
#print(feature)
|
||||
input_feature.append(feature(x))
|
||||
input_feature = torch.cat(input_feature, dim=1)
|
||||
# print(x.shape)
|
||||
#input_feature = []
|
||||
#for feature in self.features:
|
||||
# #print(feature)
|
||||
# input_feature.append(feature(x))
|
||||
#input_feature = torch.cat(input_feature, dim=1)
|
||||
|
||||
branch_output = self.branch_net(
|
||||
x.extract(self.branch_net.input_variables))
|
||||
# print(branch_output.shape)
|
||||
trunk_output = self.trunk_net(
|
||||
x.extract(self.trunk_net.input_variables))
|
||||
feat_output = self.features_net(input_feature)
|
||||
output_ = self.reduction(branch_output * trunk_output * feat_output)
|
||||
output_ = self.reduction(trunk_output * feat_output)
|
||||
# print(trunk_output.shape)
|
||||
#feat_output = self.features_net(input_feature)
|
||||
# print(feat_output.shape)
|
||||
# inner_input = torch.cat([
|
||||
# branch_output * trunk_output,
|
||||
# branch_output,
|
||||
# trunk_output,
|
||||
# feat_output], dim=1)
|
||||
# print(inner_input.shape)
|
||||
|
||||
# output_ = self.reduction(inner_input)
|
||||
# print(output_.shape)
|
||||
print(branch_output.shape)
|
||||
print(trunk_output.shape)
|
||||
output_ = self.reduction(trunk_output * branch_output)
|
||||
output_ = LabelTensor(output_, self.output_variables)
|
||||
# local_size = int(trunk_output.shape[1]/self.output_dimension)
|
||||
# for i, var in enumerate(self.output_variables):
|
||||
|
||||
@@ -93,20 +93,10 @@ class FeedForward(torch.nn.Module):
|
||||
if self.input_variables:
|
||||
x = x.extract(self.input_variables)
|
||||
|
||||
labels = []
|
||||
features = []
|
||||
for i, feature in enumerate(self.extra_features):
|
||||
labels.append('k{}'.format(i))
|
||||
features.append(feature(x))
|
||||
|
||||
if labels and features:
|
||||
features = torch.cat(features, dim=1)
|
||||
features_tensor = LabelTensor(features, labels)
|
||||
input_ = x.append(features_tensor) # TODO error when no LabelTens
|
||||
else:
|
||||
input_ = x
|
||||
x = x.append(feature(x))
|
||||
|
||||
if self.output_variables:
|
||||
return LabelTensor(self.model(input_), self.output_variables)
|
||||
return LabelTensor(self.model(x), self.output_variables)
|
||||
else:
|
||||
return self.model(input_)
|
||||
return self.model(x)
|
||||
|
||||
@@ -4,62 +4,158 @@ import torch
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
|
||||
def grad(output_, input_):
|
||||
def grad(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
def grad_scalar_output(output_, input_, d):
|
||||
"""
|
||||
"""
|
||||
|
||||
if len(output_.labels) != 1:
|
||||
raise RuntimeError
|
||||
if not all([di in input_.labels for di in d]):
|
||||
raise RuntimeError
|
||||
|
||||
output_fieldname = output_.labels[0]
|
||||
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.dtype,
|
||||
device=input_.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
gradients.labels = input_.labels
|
||||
gradients = gradients.extract(d)
|
||||
gradients.labels = [f'd{output_fieldname}d{i}' for i in d]
|
||||
|
||||
return gradients
|
||||
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] == 1: # scalar output ################################
|
||||
|
||||
if components != output_.labels:
|
||||
raise RuntimeError
|
||||
gradients = grad_scalar_output(output_, input_, d)
|
||||
|
||||
elif output_.shape[1] >= 2: # vector output ##############################
|
||||
|
||||
for i, c in enumerate(components):
|
||||
c_output = output_.extract([c])
|
||||
if i == 0:
|
||||
gradients = grad_scalar_output(c_output, input_, d)
|
||||
else:
|
||||
gradients = gradients.append(
|
||||
grad_scalar_output(c_output, input_, d))
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
return gradients
|
||||
|
||||
|
||||
def div(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.dtype,
|
||||
device=input_.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
return LabelTensor(gradients, input_.labels)
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] < 2 or len(components) < 2:
|
||||
raise ValueError('div supported only for vector field')
|
||||
|
||||
if len(components) != len(d):
|
||||
raise ValueError
|
||||
|
||||
grad_output = grad(output_, input_, components, d)
|
||||
div = torch.empty(input_.shape[0], len(components))
|
||||
labels = [None] * len(components)
|
||||
for i, c in enumerate(components):
|
||||
c_fields = [f'd{c}d{di}' for di in d]
|
||||
div[:, i] = grad_output.extract(c_fields).sum(axis=1)
|
||||
labels[i] = '+'.join(c_fields)
|
||||
|
||||
return LabelTensor(div, labels)
|
||||
|
||||
|
||||
def div(output_, input_):
|
||||
def nabla(output_, input_, components=None, d=None, method='std'):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if output_.shape[1] == 1:
|
||||
div = grad(output_, input_).sum(axis=1)
|
||||
else: # really to improve
|
||||
a = []
|
||||
for o in output_.T:
|
||||
a.append(grad(o, input_).extract(['x', 'y']))
|
||||
div = torch.zeros(output_.shape[0], 1)
|
||||
for i in range(output_.shape[1]):
|
||||
div += a[i][:, i].reshape(-1, 1)
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
return div
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if len(components) != len(d) and len(components) != 1:
|
||||
raise ValueError
|
||||
|
||||
if method == 'divgrad':
|
||||
raise NotImplementedError
|
||||
# TODO fix
|
||||
# grad_output = grad(output_, input_, components, d)
|
||||
# result = div(grad_output, input_, d=d)
|
||||
elif method == 'std':
|
||||
|
||||
if len(components) == 1:
|
||||
grad_output = grad(output_, input_, components=components, d=d)
|
||||
result = torch.zeros(output_.shape[0], 1)
|
||||
for i, label in enumerate(grad_output.labels):
|
||||
gg = grad(grad_output, input_, d=d, components=[label])
|
||||
result[:, 0] += gg[:, i]
|
||||
labels = [f'dd{components[0]}']
|
||||
|
||||
else:
|
||||
result = torch.empty(input_.shape[0], len(components))
|
||||
labels = [None] * len(components)
|
||||
for idx, (ci, di) in enumerate(zip(components, d)):
|
||||
|
||||
if not isinstance(ci, list):
|
||||
ci = [ci]
|
||||
if not isinstance(di, list):
|
||||
di = [di]
|
||||
|
||||
grad_output = grad(output_, input_, components=ci, d=di)
|
||||
result[:, idx] = grad(grad_output, input_, d=di).flatten()
|
||||
labels[idx] = f'dd{ci}dd{di}'
|
||||
|
||||
return LabelTensor(result, labels)
|
||||
|
||||
|
||||
def nabla(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return div(grad(output_, input_).extract(['x', 'y']), input_)
|
||||
|
||||
|
||||
def advection_term(output_, input_):
|
||||
def advection(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
dimension = len(output_.labels)
|
||||
for i, label in enumerate(output_.labels):
|
||||
# compute u dot gradient in each direction
|
||||
gradient_loc = grad(output_.extract([label]), input_).extract(input_.labels[:dimension])
|
||||
# compute u dot gradient in each direction
|
||||
gradient_loc = grad(output_.extract([label]),
|
||||
input_).extract(input_.labels[:dimension])
|
||||
dim_0 = gradient_loc.shape[0]
|
||||
dim_1 = gradient_loc.shape[1]
|
||||
u_dot_grad_loc = torch.bmm(output_.view(dim_0, 1, dim_1),
|
||||
gradient_loc.view(dim_0, dim_1, 1))
|
||||
gradient_loc.view(dim_0, dim_1, 1))
|
||||
u_dot_grad_loc = LabelTensor(torch.reshape(u_dot_grad_loc,
|
||||
(u_dot_grad_loc.shape[0], u_dot_grad_loc.shape[1])), [input_.labels[i]])
|
||||
if i==0:
|
||||
(u_dot_grad_loc.shape[0],
|
||||
u_dot_grad_loc.shape[1])),
|
||||
[input_.labels[i]])
|
||||
if i == 0:
|
||||
adv_term = u_dot_grad_loc
|
||||
else:
|
||||
adv_term = adv_term.append(u_dot_grad_loc)
|
||||
|
||||
87
pina/pinn.py
87
pina/pinn.py
@@ -123,6 +123,7 @@ class PINN(object):
|
||||
def span_pts(self, *args, **kwargs):
|
||||
"""
|
||||
>>> pinn.span_pts(n=10, mode='grid')
|
||||
>>> pinn.span_pts(n=10, mode='grid', location=['bound1'])
|
||||
>>> pinn.span_pts(n=10, mode='grid', variables=['x'])
|
||||
"""
|
||||
|
||||
@@ -133,23 +134,30 @@ class PINN(object):
|
||||
n1 = tensor1.shape[0]
|
||||
n2 = tensor2.shape[0]
|
||||
|
||||
tensor1 = LabelTensor(tensor1.repeat(n2, 1), labels=tensor1.labels)
|
||||
tensor1 = LabelTensor(
|
||||
tensor1.repeat(n2, 1),
|
||||
labels=tensor1.labels)
|
||||
tensor2 = LabelTensor(
|
||||
tensor2.repeat_interleave(n1, dim=0), labels=tensor2.labels)
|
||||
tensor2.repeat_interleave(n1, dim=0),
|
||||
labels=tensor2.labels)
|
||||
return tensor1.append(tensor2)
|
||||
else:
|
||||
pass
|
||||
elif len(tensors):
|
||||
return tensors[0]
|
||||
|
||||
if isinstance(args[0], int) and isinstance(args[1], str):
|
||||
pass
|
||||
variables = self.problem.input_variables
|
||||
argument = {}
|
||||
argument['n'] = int(args[0])
|
||||
argument['mode'] = args[1]
|
||||
argument['variables'] = self.problem.input_variables
|
||||
arguments = [argument]
|
||||
elif all(isinstance(arg, dict) for arg in args):
|
||||
print(args)
|
||||
arguments = args
|
||||
pass
|
||||
elif all(key in kwargs for key in ['n', 'mode']):
|
||||
variables = self.problem.input_variables
|
||||
pass
|
||||
argument = {}
|
||||
argument['n'] = kwargs['n']
|
||||
argument['mode'] = kwargs['mode']
|
||||
argument['variables'] = self.problem.input_variables
|
||||
arguments = [argument]
|
||||
else:
|
||||
raise RuntimeError
|
||||
|
||||
@@ -174,26 +182,23 @@ class PINN(object):
|
||||
self.input_pts[location].requires_grad_(True)
|
||||
self.input_pts[location].retain_grad()
|
||||
|
||||
|
||||
def plot_pts(self, locations='all'):
|
||||
import matplotlib
|
||||
# matplotlib.use('GTK3Agg')
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
for location in locations:
|
||||
x = self.input_pts[location].extract(['x'])
|
||||
y = self.input_pts[location].extract(['y'])
|
||||
plt.plot(x.detach(), y.detach(), '.', label=location)
|
||||
# np.savetxt('burgers_{}_pts.txt'.format(location), self.input_pts[location].tensor.detach(), header='x y', delimiter=' ')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def train(self, stop=100, frequency_print=2, trial=None):
|
||||
|
||||
epoch = 0
|
||||
|
||||
header = []
|
||||
for condition_name in self.problem.conditions:
|
||||
condition = self.problem.conditions[condition_name]
|
||||
|
||||
if hasattr(condition, 'function'):
|
||||
if isinstance(condition.function, list):
|
||||
for function in condition.function:
|
||||
header.append(f'{condition_name}{function.__name__}')
|
||||
|
||||
continue
|
||||
|
||||
header.append(f'{condition_name}')
|
||||
|
||||
while True:
|
||||
|
||||
losses = []
|
||||
@@ -204,23 +209,20 @@ class PINN(object):
|
||||
if hasattr(condition, 'function'):
|
||||
pts = self.input_pts[condition_name]
|
||||
predicted = self.model(pts)
|
||||
if isinstance(condition.function, list):
|
||||
for function in condition.function:
|
||||
residuals = function(pts, predicted)
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
losses.append(local_loss)
|
||||
else:
|
||||
residuals = condition.function(pts, predicted)
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
for function in condition.function:
|
||||
residuals = function(pts, predicted)
|
||||
local_loss = (
|
||||
condition.data_weight*self._compute_norm(
|
||||
residuals))
|
||||
losses.append(local_loss)
|
||||
elif hasattr(condition, 'output_points'):
|
||||
pts = condition.input_points
|
||||
# print(pts)
|
||||
predicted = self.model(pts)
|
||||
# print(predicted)
|
||||
residuals = predicted - condition.output_points
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
local_loss = (
|
||||
condition.data_weight*self._compute_norm(residuals))
|
||||
losses.append(local_loss)
|
||||
|
||||
self.optimizer.zero_grad()
|
||||
|
||||
sum(losses).backward()
|
||||
@@ -239,12 +241,21 @@ class PINN(object):
|
||||
|
||||
if isinstance(stop, int):
|
||||
if epoch == stop:
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
print()
|
||||
break
|
||||
elif isinstance(stop, float):
|
||||
if sum(losses) < stop:
|
||||
break
|
||||
|
||||
if epoch % frequency_print == 0:
|
||||
if epoch % frequency_print == 0 or epoch == 1:
|
||||
print(' {:5s} {:12s} '.format('', 'sum'), end='')
|
||||
for name in header:
|
||||
print('{:12.12s} '.format(name), end='')
|
||||
print()
|
||||
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
|
||||
@@ -79,7 +79,7 @@ class Plotter:
|
||||
|
||||
|
||||
|
||||
def plot(self, obj, method='contourf', component='u', parametric=False, params_value=1, filename=None):
|
||||
def plot(self, obj, method='contourf', component='u', parametric=False, params_value=1.5, filename=None):
|
||||
"""
|
||||
"""
|
||||
res = 256
|
||||
|
||||
@@ -22,7 +22,7 @@ class Span(Location):
|
||||
|
||||
def sample(self, n, mode='random', variables='all'):
|
||||
|
||||
if variables=='all':
|
||||
if variables == 'all':
|
||||
spatial_range_ = list(self.range_.keys())
|
||||
spatial_fixed_ = list(self.fixed_.keys())
|
||||
bounds = np.array(list(self.range_.values()))
|
||||
@@ -41,6 +41,7 @@ class Span(Location):
|
||||
fixed.append(int(self.fixed_[variable]))
|
||||
fixed = torch.Tensor(fixed)
|
||||
bounds = np.array(bounds)
|
||||
|
||||
if mode == 'random':
|
||||
pts = np.random.uniform(size=(n, bounds.shape[0]))
|
||||
elif mode == 'chebyshev':
|
||||
@@ -59,6 +60,7 @@ class Span(Location):
|
||||
from scipy.stats import qmc
|
||||
sampler = qmc.LatinHypercube(d=bounds.shape[0])
|
||||
pts = sampler.random(n)
|
||||
|
||||
# Scale pts
|
||||
pts *= bounds[:, 1] - bounds[:, 0]
|
||||
pts += bounds[:, 0]
|
||||
|
||||
@@ -12,7 +12,7 @@ class myFeature(torch.nn.Module):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return torch.sin(torch.pi * x.extract('a'))
|
||||
return LabelTensor(torch.sin(torch.pi * x.extract('a')), 'sin(a)')
|
||||
|
||||
|
||||
data = torch.rand((20, 3))
|
||||
|
||||
@@ -72,9 +72,8 @@ def test_merge():
|
||||
|
||||
def test_merge():
|
||||
tensor = LabelTensor(data, labels)
|
||||
tensor_a = tensor.extract('a')
|
||||
tensor_b = tensor.extract('b')
|
||||
tensor_c = tensor.extract('c')
|
||||
|
||||
tensor_bb = tensor_b.append(tensor_b)
|
||||
assert torch.allclose(tensor_b, tensor.extract(['b', 'c']))
|
||||
tensor_bc = tensor_b.append(tensor_c)
|
||||
assert torch.allclose(tensor_bc, tensor.extract(['b', 'c']))
|
||||
|
||||
Reference in New Issue
Block a user