Refactoring code
This commit is contained in:
21
LICENSE.rst
Normal file
21
LICENSE.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2021-2021 PINA contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
49
examples/problems/burgers.py
Normal file
49
examples/problems/burgers.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import numpy as np
|
||||
import scipy.io
|
||||
import torch
|
||||
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem import TimeDependentProblem, Problem1D
|
||||
from pina.operators import grad
|
||||
|
||||
def tmp_grad(output_, input_):
|
||||
return torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
|
||||
class Burgers1D(TimeDependentProblem, Problem1D):
|
||||
|
||||
input_variables = ['x', 't']
|
||||
output_variables = ['u']
|
||||
spatial_domain = Cube([[-1, 1]])
|
||||
temporal_domain = Cube([[0, 1]])
|
||||
|
||||
def burger_equation(input_, output_):
|
||||
grad_u = grad(output_['u'], input_)
|
||||
grad_x, grad_t = tmp_grad(output_['u'], input_).T
|
||||
gradgrad_u_x = grad(grad_u['x'], input_)
|
||||
grad_xx = tmp_grad(grad_x, input_)[:, 0]
|
||||
return grad_u['t'] + output_['u']*grad_u['x'] - (0.01/torch.pi)*gradgrad_u_x['x']
|
||||
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
u_expected = 0.0
|
||||
return output_['u'] - u_expected
|
||||
|
||||
def initial_condition(input_, output_):
|
||||
u_expected = -torch.sin(torch.pi*input_['x'])
|
||||
return output_['u'] - u_expected
|
||||
|
||||
|
||||
|
||||
conditions = {
|
||||
'gamma1': {'location': Segment((-1, 0), (-1, 1)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment(( 1, 0), ( 1, 1)), 'func': nil_dirichlet},
|
||||
'initia': {'location': Segment((-1, 0), ( 1, 0)), 'func': initial_condition},
|
||||
'D': {'location': Cube([[-1, 1],[0,1]]), 'func': burger_equation}
|
||||
}
|
||||
35
examples/problems/poisson2D.py
Normal file
35
examples/problems/poisson2D.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem import Problem2D
|
||||
from pina.operators import grad, div, nabla
|
||||
|
||||
|
||||
class Poisson2D(Problem2D):
|
||||
|
||||
input_variables = ['x', 'y']
|
||||
output_variables = ['u']
|
||||
spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
force_term = (torch.sin(input_['x']*torch.pi)
|
||||
* torch.sin(input_['y']*torch.pi))
|
||||
return nabla(output_['u'], input_).flatten() - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
conditions = {
|
||||
'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
def poisson_sol(self, x, y):
|
||||
return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
|
||||
|
||||
truth_solution = poisson_sol
|
||||
@@ -9,34 +9,20 @@ from torch.nn import ReLU, Tanh, Softplus
|
||||
from problems.burgers import Burgers1D
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
from pina import Plotter
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
Feature: sin(pi*x)
|
||||
"""
|
||||
|
||||
def __init__(self, idx):
|
||||
|
||||
super(myFeature, self).__init__()
|
||||
self.idx = idx
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
return torch.sin(torch.pi * x[:, self.idx])
|
||||
|
||||
class myExp(torch.nn.Module):
|
||||
def __init__(self, idx):
|
||||
|
||||
super().__init__()
|
||||
self.idx = idx
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
return torch.exp(x[:, self.idx])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run PINA")
|
||||
@@ -51,13 +37,13 @@ if __name__ == "__main__":
|
||||
|
||||
burgers_problem = Burgers1D()
|
||||
model = DeepFeedForward(
|
||||
layers=[20, 10, 5],
|
||||
#layers=[8, 4, 2],
|
||||
layers=[30, 20, 10, 5],
|
||||
#layers=[8, 8, 8],
|
||||
#layers=[16, 8, 4, 4],
|
||||
#layers=[20, 4, 4, 4],
|
||||
output_variables=burgers_problem.output_variables,
|
||||
input_variables=burgers_problem.input_variables,
|
||||
func=Tanh,
|
||||
func=Softplus,
|
||||
extra_features=feat
|
||||
)
|
||||
|
||||
@@ -70,46 +56,11 @@ if __name__ == "__main__":
|
||||
lr_accelerate=None)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(8000, 'latin', ['D'])
|
||||
pinn.span_pts(50, 'random', ['gamma1', 'gamma2', 'initia'])
|
||||
#pinn.plot_pts()
|
||||
pinn.train(10000, 1000)
|
||||
#with open('burgers_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
# for i, losses in enumerate(pinn.history):
|
||||
# file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
pinn.span_pts(2000, 'latin', ['D'])
|
||||
pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 'initia'])
|
||||
pinn.train(5000, 100)
|
||||
pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features))
|
||||
|
||||
else:
|
||||
pinn.load_state('pina.burger.{}.{}'.format(args.id_run, args.features))
|
||||
#pinn.plot(256,filename='pina.burger.{}.{}.jpg'.format(args.id_run, args.features))
|
||||
|
||||
|
||||
print(pinn.history)
|
||||
with open('burgers_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
print(losses)
|
||||
file_.write('{} {}\n'.format(i, sum(losses)))
|
||||
import scipy
|
||||
data = scipy.io.loadmat('Data/burgers_shock.mat')
|
||||
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
|
||||
import matplotlib
|
||||
matplotlib.use('Qt5Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
t =75
|
||||
for t in [25, 50, 75]:
|
||||
input = torch.cat([
|
||||
torch.linspace(-1, 1, 256).reshape(-1, 1),
|
||||
torch.ones(size=(256, 1)) * t /100],
|
||||
axis=1).double()
|
||||
output = pinn.model(input)
|
||||
fout = 'pina.burgers.{}.{}.t{}.dat'.format(args.id_run, args.features, t)
|
||||
with open(fout, 'w') as f_:
|
||||
f_.write('x utruth upinn\n')
|
||||
for x, utruth, upinn in zip(data['x'], data['usol'][:, t], output.tensor.detach()):
|
||||
f_.write('{} {} {}\n'.format(x[0], utruth, upinn.item()))
|
||||
plt.plot(data['usol'][:, t], label='truth')
|
||||
plt.plot(output.tensor.detach(), 'x', label='pinn')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
plotter = Plotter()
|
||||
plotter.plot(pinn)
|
||||
|
||||
@@ -2,15 +2,16 @@ import sys
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from problems.poisson2D import Poisson2DProblem as Poisson2D
|
||||
from problems.poisson2D import Poisson2D
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
|
||||
from pina import Plotter
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
@@ -54,17 +55,18 @@ if __name__ == "__main__":
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(10, 'grid', ['D'])
|
||||
pinn.span_pts(10, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.span_pts(20, 'grid', ['D'])
|
||||
pinn.span_pts(20, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
#pinn.plot_pts()
|
||||
pinn.train(10000, 100)
|
||||
pinn.train(1000, 100)
|
||||
with open('poisson_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
file_.write('{} {}\n'.format(i, sum(losses)))
|
||||
pinn.save_state('pina.poisson')
|
||||
|
||||
else:
|
||||
pinn.load_state('pina.poisson')
|
||||
pinn.plot(40)
|
||||
plotter = Plotter()
|
||||
plotter.plot(pinn)
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from .pinn import PINN
|
||||
from .deep_feed_forward import DeepFeedForward
|
||||
from .problem1d import Problem1D
|
||||
|
||||
from .label_tensor import LabelTensor
|
||||
from .plotter import Plotter
|
||||
|
||||
19
pina/cube.py
19
pina/cube.py
@@ -1,6 +1,7 @@
|
||||
import numpy as np
|
||||
from .chebyshev import chebyshev_roots
|
||||
|
||||
|
||||
class Cube():
|
||||
def __init__(self, bound):
|
||||
self.bound = np.asarray(bound)
|
||||
@@ -10,11 +11,15 @@ class Cube():
|
||||
if mode == 'random':
|
||||
pts = np.random.uniform(size=(n, self.bound.shape[0]))
|
||||
elif mode == 'chebyshev':
|
||||
pts = np.array([chebyshev_roots(n) *.5 + .5 for _ in range(self.bound.shape[0])])
|
||||
pts = np.array([
|
||||
chebyshev_roots(n) * .5 + .5
|
||||
for _ in range(self.bound.shape[0])])
|
||||
grids = np.meshgrid(*pts)
|
||||
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
|
||||
elif mode == 'grid':
|
||||
pts = np.array([np.linspace(0, 1, n) for _ in range(self.bound.shape[0])])
|
||||
pts = np.array([
|
||||
np.linspace(0, 1, n)
|
||||
for _ in range(self.bound.shape[0])])
|
||||
grids = np.meshgrid(*pts)
|
||||
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
|
||||
elif mode == 'lh' or mode == 'latin':
|
||||
@@ -27,3 +32,13 @@ class Cube():
|
||||
pts += self.bound[:, 0]
|
||||
|
||||
return pts
|
||||
|
||||
def meshgrid(self, n):
|
||||
pts = np.array([
|
||||
np.linspace(0, 1, n)
|
||||
for _ in range(self.bound.shape[0])])
|
||||
|
||||
pts *= self.bound[:, 1] - self.bound[:, 0]
|
||||
pts += self.bound[:, 0]
|
||||
|
||||
return np.meshgrid(*pts)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
|
||||
from .problem import Problem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
@@ -12,18 +10,18 @@ from pina.label_tensor import LabelTensor
|
||||
|
||||
class DeepFeedForward(torch.nn.Module):
|
||||
|
||||
def __init__(self,
|
||||
inner_size=20,
|
||||
n_layers=2,
|
||||
func=nn.Tanh,
|
||||
input_variables=None,
|
||||
output_variables=None,
|
||||
layers=None,
|
||||
def __init__(self,
|
||||
inner_size=20,
|
||||
n_layers=2,
|
||||
func=nn.Tanh,
|
||||
input_variables=None,
|
||||
output_variables=None,
|
||||
layers=None,
|
||||
extra_features=None):
|
||||
'''
|
||||
'''
|
||||
super(DeepFeedForward, self).__init__()
|
||||
|
||||
|
||||
if extra_features is None:
|
||||
extra_features = []
|
||||
self.extra_features = nn.Sequential(*extra_features)
|
||||
@@ -48,7 +46,7 @@ class DeepFeedForward(torch.nn.Module):
|
||||
self.layers = []
|
||||
for i in range(len(tmp_layers)-1):
|
||||
self.layers.append(nn.Linear(tmp_layers[i], tmp_layers[i+1]))
|
||||
|
||||
|
||||
|
||||
|
||||
if isinstance(func, list):
|
||||
|
||||
20
pina/meta.py
Normal file
20
pina/meta.py
Normal file
@@ -0,0 +1,20 @@
|
||||
__all__ = [
|
||||
'__project__',
|
||||
'__title__',
|
||||
'__author__',
|
||||
'__copyright__',
|
||||
'__license__',
|
||||
'__version__',
|
||||
'__mail__',
|
||||
'__maintainer__',
|
||||
'__status__']
|
||||
|
||||
__project__ = 'PINA'
|
||||
__title__ = "pina"
|
||||
__author__ = "Nicola Demo, Maria Strazzullo"
|
||||
__copyright__ = "Copyright 2021-2021, PINA contributors"
|
||||
__license__ = "MIT"
|
||||
__version__ = "0.0.0"
|
||||
__mail__ = 'demo.nicola@gmail.com, ' # TODO
|
||||
__maintainer__ = __author__
|
||||
__status__ = "Alpha"
|
||||
45
pina/operators.py
Normal file
45
pina/operators.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Module for operators vectorize implementation"""
|
||||
import torch
|
||||
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
|
||||
def grad(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
return LabelTensor(gradients, input_.labels)
|
||||
|
||||
|
||||
def div(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if output_.tensor.shape[1] == 1:
|
||||
div = grad(output_.tensor, input_).sum(axis=1)
|
||||
else: # really to improve
|
||||
a = []
|
||||
for o in output_.tensor.T:
|
||||
a.append(grad(o, input_).tensor)
|
||||
div = torch.zeros(output_.tensor.shape[0], 1)
|
||||
for i in range(output_.tensor.shape[1]):
|
||||
div += a[i][:, i].reshape(-1, 1)
|
||||
|
||||
return div
|
||||
|
||||
|
||||
def nabla(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return div(grad(output_, input_), input_)
|
||||
22
pina/pinn.py
22
pina/pinn.py
@@ -1,12 +1,7 @@
|
||||
from mpmath import chebyt, chop, taylor
|
||||
|
||||
from .problem import Problem
|
||||
from .problem import AbstractProblem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from .cube import Cube
|
||||
from .segment import Segment
|
||||
from .deep_feed_forward import DeepFeedForward
|
||||
from pina.label_tensor import LabelTensor
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
|
||||
@@ -86,7 +81,7 @@ class PINN(object):
|
||||
|
||||
@problem.setter
|
||||
def problem(self, problem):
|
||||
if not isinstance(problem, Problem):
|
||||
if not isinstance(problem, AbstractProblem):
|
||||
raise TypeError
|
||||
self._problem = problem
|
||||
|
||||
@@ -157,7 +152,6 @@ class PINN(object):
|
||||
self.trained_epoch = checkpoint['epoch']
|
||||
self.history = checkpoint['history']
|
||||
|
||||
print(self.history)
|
||||
return self
|
||||
|
||||
|
||||
@@ -188,7 +182,6 @@ class PINN(object):
|
||||
def plot_pts(self, locations='all'):
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
|
||||
@@ -197,7 +190,6 @@ class PINN(object):
|
||||
#plt.plot(x.detach(), y.detach(), 'o', label=location)
|
||||
np.savetxt('burgers_{}_pts.txt'.format(location), self.input_pts[location].tensor.detach(), header='x y', delimiter=' ')
|
||||
|
||||
gggg
|
||||
|
||||
plt.legend()
|
||||
plt.show()
|
||||
@@ -234,7 +226,7 @@ class PINN(object):
|
||||
|
||||
if trial:
|
||||
import optuna
|
||||
trial.report(loss[0].item()+loss[1].item(), epoch)
|
||||
trial.report(sum(losses), epoch)
|
||||
if trial.should_prune():
|
||||
raise optuna.exceptions.TrialPruned()
|
||||
|
||||
@@ -282,6 +274,9 @@ class PINN(object):
|
||||
print("Something went wrong...")
|
||||
print("Not able to compute the error. Please pass a data solution or a true solution")
|
||||
|
||||
|
||||
|
||||
|
||||
def plot(self, res, filename=None, variable=None):
|
||||
'''
|
||||
'''
|
||||
@@ -289,6 +284,9 @@ class PINN(object):
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
self._plot_2D(res, filename, variable)
|
||||
print('TTTTTTTTTTTTTTTTTt')
|
||||
print(self.problem.bounds)
|
||||
pts_container = []
|
||||
#for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
for mn, mx in [[0, 1], [0, 1]]:
|
||||
|
||||
93
pina/plotter.py
Normal file
93
pina/plotter.py
Normal file
@@ -0,0 +1,93 @@
|
||||
""" Module for plotting. """
|
||||
import matplotlib
|
||||
matplotlib.use('Qt5Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from pina import LabelTensor
|
||||
from pina import PINN
|
||||
from .problem import Problem2D, Problem1D, TimeDependentProblem
|
||||
#from pina.tdproblem1d import TimeDepProblem1D
|
||||
|
||||
|
||||
class Plotter:
|
||||
|
||||
def _plot_2D(self, obj, method='contourf'):
|
||||
"""
|
||||
"""
|
||||
if not isinstance(obj, PINN):
|
||||
raise RuntimeError
|
||||
|
||||
res = 256
|
||||
pts = obj.problem.spatial_domain.discretize(res, 'grid')
|
||||
grids_container = [
|
||||
pts[:, 0].reshape(res, res),
|
||||
pts[:, 1].reshape(res, res),
|
||||
]
|
||||
pts = LabelTensor(torch.tensor(pts), obj.problem.input_variables)
|
||||
predicted_output = obj.model(pts.tensor)
|
||||
|
||||
if hasattr(obj.problem, 'truth_solution'):
|
||||
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
|
||||
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes[0])
|
||||
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes[1])
|
||||
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
|
||||
fig.colorbar(cb, ax=axes[2])
|
||||
else:
|
||||
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
|
||||
cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes)
|
||||
|
||||
|
||||
def _plot_1D_TimeDep(self, obj, method='contourf'):
|
||||
"""
|
||||
"""
|
||||
if not isinstance(obj, PINN):
|
||||
raise RuntimeError
|
||||
|
||||
res = 256
|
||||
grids_container = np.meshgrid(
|
||||
obj.problem.spatial_domain.discretize(res, 'grid'),
|
||||
obj.problem.temporal_domain.discretize(res, 'grid'),
|
||||
)
|
||||
pts = np.hstack([
|
||||
grids_container[0].reshape(-1, 1),
|
||||
grids_container[1].reshape(-1, 1),
|
||||
])
|
||||
pts = LabelTensor(torch.tensor(pts), obj.problem.input_variables)
|
||||
predicted_output = obj.model(pts.tensor)
|
||||
|
||||
if hasattr(obj.problem, 'truth_solution'):
|
||||
truth_output = obj.problem.truth_solution(*pts.tensor.T).float()
|
||||
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6))
|
||||
|
||||
cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes[0])
|
||||
cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes[1])
|
||||
cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res))
|
||||
fig.colorbar(cb, ax=axes[2])
|
||||
else:
|
||||
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
|
||||
cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach())
|
||||
fig.colorbar(cb, ax=axes)
|
||||
|
||||
|
||||
|
||||
def plot(self, obj, filename=None):
|
||||
"""
|
||||
"""
|
||||
if isinstance(obj.problem, (TimeDependentProblem, Problem1D)): # time-dep 1D
|
||||
self._plot_1D_TimeDep(obj, method='pcolor')
|
||||
elif isinstance(obj.problem, Problem2D): # 2D
|
||||
self._plot_2D(obj, method='pcolor')
|
||||
|
||||
if filename:
|
||||
plt.savefig(filename)
|
||||
else:
|
||||
plt.show()
|
||||
@@ -1,6 +1,6 @@
|
||||
from mpmath import chebyt, chop, taylor
|
||||
|
||||
from .problem import Problem
|
||||
from .problem import AbstractProblem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
import torch
|
||||
|
||||
class Problem(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplemented
|
||||
|
||||
@property
|
||||
def variables(self):
|
||||
return self._variables
|
||||
|
||||
@variables.setter
|
||||
def variables(self, variables):
|
||||
if variables is None:
|
||||
variables = ['var']
|
||||
self._variables = variables
|
||||
|
||||
@property
|
||||
def boundary_conditions(self):
|
||||
return self._bc
|
||||
|
||||
@boundary_conditions.setter
|
||||
def boundary_conditions(self, bc):
|
||||
if isinstance(bc, (list, tuple)):
|
||||
bc = {'var': bc}
|
||||
self._bc = bc
|
||||
|
||||
@property
|
||||
def spatial_dimensions(self):
|
||||
return self._spatial_dimensions
|
||||
|
||||
@staticmethod
|
||||
def grad(output_, input_):
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
from pina.label_tensor import LabelTensor
|
||||
return LabelTensor(gradients, input_.labels)
|
||||
|
||||
|
||||
|
||||
def __str__(self):
|
||||
s = ''
|
||||
#s = 'Variables: {}\n'.format(self.variables)
|
||||
return s
|
||||
4
pina/problem/__init__.py
Normal file
4
pina/problem/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .abstract_problem import AbstractProblem
|
||||
from .problem2d import Problem2D
|
||||
from .problem1d import Problem1D
|
||||
from .timedep_problem import TimeDependentProblem
|
||||
19
pina/problem/abstract_problem.py
Normal file
19
pina/problem/abstract_problem.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
|
||||
class AbstractProblem(metaclass=ABCMeta):
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def input_variables(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def output_variables(self):
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def conditions(self):
|
||||
pass
|
||||
6
pina/problem/problem1d.py
Normal file
6
pina/problem/problem1d.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .abstract_problem import AbstractProblem
|
||||
|
||||
|
||||
class Problem1D(AbstractProblem):
|
||||
|
||||
spatial_dimensions = 1
|
||||
6
pina/problem/problem2d.py
Normal file
6
pina/problem/problem2d.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .abstract_problem import AbstractProblem
|
||||
|
||||
|
||||
class Problem2D(AbstractProblem):
|
||||
|
||||
spatial_dimensions = 2
|
||||
6
pina/problem/timedep_problem.py
Normal file
6
pina/problem/timedep_problem.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from .abstract_problem import AbstractProblem
|
||||
|
||||
|
||||
class TimeDependentProblem(AbstractProblem):
|
||||
|
||||
pass
|
||||
@@ -1,11 +0,0 @@
|
||||
|
||||
from .problem import Problem
|
||||
import numpy as np
|
||||
|
||||
class Problem1D(Problem):
|
||||
|
||||
def __init__(self, variables=None, bc=None):
|
||||
self._spatial_dimensions = 1
|
||||
self.variables = variables
|
||||
print(bc)
|
||||
self.bc = bc
|
||||
@@ -1,16 +0,0 @@
|
||||
from .problem import Problem
|
||||
|
||||
|
||||
class Problem2D(Problem):
|
||||
|
||||
spatial_dimensions = 2
|
||||
|
||||
@property
|
||||
def boundary_condition(self):
|
||||
return self._boundary_condition
|
||||
|
||||
@boundary_condition.setter
|
||||
def boundary_condition(self, bc):
|
||||
self._boundary_condition = bc
|
||||
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
import numpy as np
|
||||
from .problem1d import Problem1D
|
||||
from .segment import Segment
|
||||
|
||||
class TimeDepProblem1D(Problem1D):
|
||||
|
||||
def __init__(self, variables=None, bc=None, initial=None, tend=None, domain_bound=None):
|
||||
self.variables = variables
|
||||
self._spatial_dimensions = 2
|
||||
self.tend = tend
|
||||
self.tstart = 0
|
||||
if domain_bound is None:
|
||||
bound_pts = [bc[0] for bc in self.boundary_conditions]
|
||||
domain_bound = np.array([
|
||||
[min(bound_pts), max(bound_pts)],
|
||||
[self.tstart, self.tend ]
|
||||
])
|
||||
|
||||
self.domain_bound = np.array([[-1, 1],[0, 1]])#domain_bound
|
||||
print(domain_bound)
|
||||
self.boundary_conditions = (
|
||||
(Segment((bc[0][0], self.tstart), (bc[1][0], self.tstart)), initial),
|
||||
(Segment((bc[0][0], self.tstart), (bc[0][0], self.tend)), bc[0][1]),
|
||||
(Segment((bc[1][0], self.tstart), (bc[1][0], self.tend)), bc[1][1])
|
||||
)
|
||||
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
import numpy as np
|
||||
import scipy.io
|
||||
import torch
|
||||
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.tdproblem1d import TimeDepProblem1D
|
||||
|
||||
def tmp_grad(output_, input_):
|
||||
return torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
|
||||
class Burgers1D(TimeDepProblem1D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
|
||||
def burger_equation(input_, output_):
|
||||
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
grad_x, grad_t = tmp_grad(output_['u'], input_).T
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
grad_xx = tmp_grad(grad_x, input_)[:, 0]
|
||||
#print(grad_t, grad_u['t'])
|
||||
|
||||
#rrrr
|
||||
return grad_u['t'] + output_['u']*grad_u['x'] - (0.01/torch.pi)*gradgrad_u_x['x']
|
||||
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
u_expected = 0.0
|
||||
return output_['u'] - u_expected
|
||||
|
||||
def initial_condition(input_, output_):
|
||||
u_expected = -torch.sin(torch.pi*input_['x'])
|
||||
return output_['u'] - u_expected
|
||||
|
||||
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((-1, 0), (-1, 1)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment(( 1, 0), ( 1, 1)), 'func': nil_dirichlet},
|
||||
'initia': {'location': Segment((-1, 0), ( 1, 0)), 'func': initial_condition},
|
||||
'D': {'location': Cube([[-1, 1],[0,1]]), 'func': burger_equation}
|
||||
}
|
||||
|
||||
self.input_variables = ['x', 't']
|
||||
self.output_variables = ['u']
|
||||
self.spatial_domain = Cube([[0, 1]])
|
||||
self.temporal_domain = Cube([[0, 1]])
|
||||
|
||||
bc = (
|
||||
(-1, lambda x: torch.zeros(x.shape[0], 1)),
|
||||
( 1, lambda x: torch.zeros(x.shape[0], 1))
|
||||
)
|
||||
|
||||
initial = lambda x: -np.sin(np.pi*x[:,0]).reshape(-1, 1)
|
||||
|
||||
def equation(x, fx):
|
||||
grad_x, grad_t = Problem.grad(fx, x).T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
a = grad_t + fx.flatten()*grad_x - (0.01/torch.pi)*grad_xx
|
||||
return a
|
||||
|
||||
|
||||
burgers = TimeDepProblem1D(bc=bc, initial=initial, tend=1, domain_bound=[-1, 1])
|
||||
burgers.equation = equation
|
||||
|
||||
# read data for errors and plots
|
||||
data = scipy.io.loadmat('Data/burgers_shock.mat')
|
||||
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
|
||||
burgers.data_solution = data_solution
|
||||
@@ -1,55 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.parametricproblem2d import ParametricProblem2D
|
||||
|
||||
bc = {
|
||||
'y': (
|
||||
(Segment((0, 0), (4, 0)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((4, 0), (4, 1)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((4, 1), (0, 1)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((0, 1), (0, 0)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
),
|
||||
'p': (
|
||||
(Segment((0, 0), (4, 0)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((4, 0), (4, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((4, 1), (0, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((0, 1), (0, 0)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
)
|
||||
}
|
||||
|
||||
# optimal control parameters and data
|
||||
alpha = 1e-5
|
||||
# yd = 10*x[:, 0]*(1-x[:, 0])*x[:, 1]*(1-x[:, 1])
|
||||
# three variables
|
||||
# state y = f[0]
|
||||
# control u = f[1]
|
||||
# adjoint p = f[2]
|
||||
|
||||
# the three variables
|
||||
|
||||
def adjoint_eq(x, f):
|
||||
grad_x, grad_y = Problem.grad(f['p'], x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
return - grad_xx - grad_yy - f['y'] + 1*(x[:, 0] <= 1) + x[:, 2]*(x[:, 0] > 1)
|
||||
|
||||
def control_eq(x, f):
|
||||
return alpha*f['u'] - f['p']
|
||||
|
||||
def state_eq(x, f):
|
||||
grad_x, grad_y = Problem.grad(f['y'], x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
return - grad_xx - grad_yy - f['u']
|
||||
|
||||
def equation(x, f):
|
||||
return state_eq(x, f) + control_eq(x, f) + adjoint_eq(x, f)
|
||||
|
||||
laplace = ParametricProblem2D(
|
||||
variables=['y', 'u', 'p'],
|
||||
bc=bc,
|
||||
domain_bound=np.array([[0, 4],[0, 1]]),
|
||||
params_bound=np.array([[0.5, 2.5]]))
|
||||
laplace.equation = equation
|
||||
@@ -1,29 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.parametricproblem2d import ParametricProblem2D
|
||||
|
||||
bc = (
|
||||
(Segment((-1, -1), ( 1, -1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment(( 1, -1), ( 1, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment(( 1, 1), (-1, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((-1, 1), (-1, -1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
)
|
||||
|
||||
params_domain = np.array([
|
||||
[-1.0, 1.0],
|
||||
[-1.0, 1.0]])
|
||||
|
||||
def equation(x, fx):
|
||||
grad_x, grad_y = Problem.grad(fx, x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
a = grad_xx + grad_yy - torch.exp(- 2*(x[:, 0] - x[:, 2])**2 - 2*(x[:, 1] - x[:, 3])**2)
|
||||
return a
|
||||
|
||||
|
||||
laplace = ParametricProblem2D(bc=bc, domain_bound=params_domain, params_bound=params_domain)
|
||||
|
||||
laplace.equation = equation
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
from pina.problem import Problem
|
||||
|
||||
|
||||
class Poisson2DProblem(Problem2D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
gradgrad_u_y = self.grad(grad_u['y'], input_)
|
||||
force_term = (torch.sin(input_['x']*torch.pi)
|
||||
* torch.sin(input_['y']*torch.pi))
|
||||
return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
def poisson_sol(x, y):
|
||||
return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
|
||||
|
||||
self.input_variables = ['x', 'y']
|
||||
self.output_variables = ['u']
|
||||
self.truth_solution = poisson_sol
|
||||
self.spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
|
||||
#self.check() # Check the problem is correctly set
|
||||
@@ -1,43 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
from pina.problem import Problem
|
||||
|
||||
|
||||
class Poisson2DProblem(Problem2D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
gradgrad_u_y = self.grad(grad_u['y'], input_)
|
||||
#force_term = (torch.sin(input_['x']*torch.pi)
|
||||
# * torch.sin(input_['y']*torch.pi))
|
||||
force_term = -2*(input_['y']*(1-input_['y']) +
|
||||
input_['x']*(1-input_['x']))
|
||||
return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
def poisson_sol(x, y):
|
||||
return x*(1-x)*y*(1-y)
|
||||
|
||||
self.input_variables = ['x', 'y']
|
||||
self.output_variables = ['u']
|
||||
self.truth_solution = poisson_sol
|
||||
self.spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
|
||||
#self.check() # Check the problem is correctly set
|
||||
54
setup.py
Normal file
54
setup.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
meta = {}
|
||||
with open("pina/meta.py") as fp:
|
||||
exec(fp.read(), meta)
|
||||
|
||||
# Package meta-data.
|
||||
NAME = meta['__title__']
|
||||
DESCRIPTION = 'Physic Informed Neural networks for Advance modeling.'
|
||||
URL = 'https://github.com/mathLab/PINA'
|
||||
MAIL = meta['__mail__']
|
||||
AUTHOR = meta['__author__']
|
||||
VERSION = meta['__version__']
|
||||
KEYWORDS = 'physics-informed neural-network'
|
||||
|
||||
REQUIRED = [
|
||||
'future', 'numpy', 'matplotlib', 'torch'
|
||||
]
|
||||
|
||||
EXTRAS = {
|
||||
'docs': ['sphinx', 'sphinx_rtd_theme'],
|
||||
'test': ['pytest', 'pytest-cov'],
|
||||
}
|
||||
|
||||
LDESCRIPTION = (
|
||||
""
|
||||
)
|
||||
|
||||
setup(
|
||||
name=NAME,
|
||||
version=VERSION,
|
||||
description=DESCRIPTION,
|
||||
long_description=LDESCRIPTION,
|
||||
author=AUTHOR,
|
||||
author_email=MAIL,
|
||||
classifiers=[
|
||||
'Development Status :: 3 - Alpha',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Programming Language :: Python :: 3',
|
||||
'Programming Language :: Python :: 3.5',
|
||||
'Programming Language :: Python :: 3.6',
|
||||
'Programming Language :: Python :: 3.7',
|
||||
'Intended Audience :: Science/Research',
|
||||
'Topic :: Scientific/Engineering :: Mathematics'
|
||||
],
|
||||
keywords=KEYWORDS,
|
||||
url=URL,
|
||||
license='MIT',
|
||||
packages=find_packages(),
|
||||
install_requires=REQUIRED,
|
||||
extras_require=EXTRAS,
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
)
|
||||
Reference in New Issue
Block a user