tmp commit - toward 0.0.1
This commit is contained in:
139
.gitignore
vendored
Normal file
139
.gitignore
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
void
|
||||
# PINA
|
||||
# Physic Informed Neural networks for Advance modeling
|
||||
|
||||
115
examples/run_burgers.py
Normal file
115
examples/run_burgers.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import sys
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from problems.burgers import Burgers1D
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self, idx):
|
||||
|
||||
super(myFeature, self).__init__()
|
||||
self.idx = idx
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
return torch.sin(torch.pi * x[:, self.idx])
|
||||
|
||||
class myExp(torch.nn.Module):
|
||||
def __init__(self, idx):
|
||||
|
||||
super().__init__()
|
||||
self.idx = idx
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
return torch.exp(x[:, self.idx])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run PINA")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-s", "-save", action="store_true")
|
||||
group.add_argument("-l", "-load", action="store_true")
|
||||
parser.add_argument("id_run", help="number of run", type=int)
|
||||
parser.add_argument("features", help="extra features", type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
feat = [myFeature(0)] if args.features else []
|
||||
|
||||
burgers_problem = Burgers1D()
|
||||
model = DeepFeedForward(
|
||||
layers=[20, 10, 5],
|
||||
#layers=[8, 4, 2],
|
||||
#layers=[16, 8, 4, 4],
|
||||
#layers=[20, 4, 4, 4],
|
||||
output_variables=burgers_problem.output_variables,
|
||||
input_variables=burgers_problem.input_variables,
|
||||
func=Tanh,
|
||||
extra_features=feat
|
||||
)
|
||||
|
||||
pinn = PINN(
|
||||
burgers_problem,
|
||||
model,
|
||||
lr=0.006,
|
||||
error_norm='mse',
|
||||
regularizer=0,
|
||||
lr_accelerate=None)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(8000, 'latin', ['D'])
|
||||
pinn.span_pts(50, 'random', ['gamma1', 'gamma2', 'initia'])
|
||||
#pinn.plot_pts()
|
||||
pinn.train(10000, 1000)
|
||||
#with open('burgers_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
# for i, losses in enumerate(pinn.history):
|
||||
# file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features))
|
||||
|
||||
else:
|
||||
pinn.load_state('pina.burger.{}.{}'.format(args.id_run, args.features))
|
||||
#pinn.plot(256,filename='pina.burger.{}.{}.jpg'.format(args.id_run, args.features))
|
||||
|
||||
|
||||
print(pinn.history)
|
||||
with open('burgers_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
print(losses)
|
||||
file_.write('{} {}\n'.format(i, sum(losses)))
|
||||
import scipy
|
||||
data = scipy.io.loadmat('Data/burgers_shock.mat')
|
||||
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
|
||||
import matplotlib
|
||||
matplotlib.use('Qt5Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
t =75
|
||||
for t in [25, 50, 75]:
|
||||
input = torch.cat([
|
||||
torch.linspace(-1, 1, 256).reshape(-1, 1),
|
||||
torch.ones(size=(256, 1)) * t /100],
|
||||
axis=1).double()
|
||||
output = pinn.model(input)
|
||||
fout = 'pina.burgers.{}.{}.t{}.dat'.format(args.id_run, args.features, t)
|
||||
with open(fout, 'w') as f_:
|
||||
f_.write('x utruth upinn\n')
|
||||
for x, utruth, upinn in zip(data['x'], data['usol'][:, t], output.tensor.detach()):
|
||||
f_.write('{} {} {}\n'.format(x[0], utruth, upinn.item()))
|
||||
plt.plot(data['usol'][:, t], label='truth')
|
||||
plt.plot(output.tensor.detach(), 'x', label='pinn')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
207
examples/run_parametric_elliptic_optimal_control_alpha.py
Normal file
207
examples/run_parametric_elliptic_optimal_control_alpha.py
Normal file
@@ -0,0 +1,207 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from pina.adaptive_functions.adaptive_softplus import AdaptiveSoftplus
|
||||
from problems.parametric_elliptic_optimal_control_alpha_variable import ParametricEllipticOptimalControl
|
||||
from pina.multi_deep_feed_forward import MultiDeepFeedForward
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
alpha = 1
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return (-x[:, 0]**2+1) * (-x[:, 1]**2+1)
|
||||
|
||||
|
||||
class CustomMultiDFF(MultiDeepFeedForward):
|
||||
|
||||
def __init__(self, dff_dict):
|
||||
super().__init__(dff_dict)
|
||||
|
||||
def forward(self, x):
|
||||
out = self.uu(x)
|
||||
p = LabelTensor((out['u_param'] * x[:, 3]).reshape(-1, 1), ['p'])
|
||||
a = LabelTensor.hstack([out, p])
|
||||
return a
|
||||
|
||||
model = CustomMultiDFF(
|
||||
{
|
||||
'uu': {
|
||||
'input_variables': ['x1', 'x2', 'mu', 'alpha'],
|
||||
'output_variables': ['u_param', 'y'],
|
||||
'layers': [40, 40, 20],
|
||||
'func': Softplus,
|
||||
'extra_features': [myFeature()],
|
||||
},
|
||||
# 'u_param': {
|
||||
# 'input_variables': ['u', 'mu'],
|
||||
# 'output_variables': ['u_param'],
|
||||
# 'layers': [],
|
||||
# 'func': None
|
||||
# },
|
||||
# 'p': {
|
||||
# 'input_variables': ['u'],
|
||||
# 'output_variables': ['p'],
|
||||
# 'layers': [10],
|
||||
# 'func': None
|
||||
# },
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
opc = ParametricEllipticOptimalControl(alpha)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run PINA")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-s", "-save", action="store_true")
|
||||
group.add_argument("-l", "-load", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
# model = DeepFeedForward(
|
||||
# layers=[40, 40, 20],
|
||||
# output_variables=['u_param', 'y', 'p'],
|
||||
# input_variables=opc.input_variables+['mu', 'alpha'],
|
||||
# func=Softplus,
|
||||
# extra_features=[myFeature()]
|
||||
# )
|
||||
|
||||
|
||||
pinn = pPINN(
|
||||
opc,
|
||||
model,
|
||||
lr=0.002,
|
||||
error_norm='mse',
|
||||
regularizer=1e-8,
|
||||
lr_accelerate=None)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(30, 'grid', ['D1'])
|
||||
pinn.span_pts(50, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
pinn.train(10000, 20)
|
||||
# with open('ocp_wrong_history.txt', 'w') as file_:
|
||||
# for i, losses in enumerate(pinn.history):
|
||||
# file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
|
||||
pinn.save_state('pina.ocp')
|
||||
|
||||
else:
|
||||
pinn.load_state('working.pina.ocp')
|
||||
pinn.load_state('pina.ocp')
|
||||
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# res = 64
|
||||
# param = torch.tensor([[3., 1]])
|
||||
# pts_container = []
|
||||
# for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
# pts_container.append(np.linspace(mn, mx, res))
|
||||
# grids_container = np.meshgrid(*pts_container)
|
||||
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
|
||||
|
||||
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
|
||||
# Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
# print(Z_pred.tensor.shape)
|
||||
|
||||
# plt.subplot(2, 3, 1)
|
||||
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 2)
|
||||
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 3)
|
||||
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# with open('ocp_mu3_a1_plot.txt', 'w') as f_:
|
||||
# f_.write('x y u p ys\n')
|
||||
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
|
||||
# Z_pred['u_param'].reshape(-1, 1),
|
||||
# Z_pred['p'].reshape(-1, 1),
|
||||
# Z_pred['y'].reshape(-1, 1),
|
||||
# ):
|
||||
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
|
||||
# param = torch.tensor([[3.0, 0.01]])
|
||||
# unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
# unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0], 1).reshape(-1, 2)], axis=1)
|
||||
# unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu', 'alpha'])
|
||||
# Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
|
||||
# plt.subplot(2, 3, 4)
|
||||
# plt.pcolor(Z_pred['y'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 5)
|
||||
# plt.pcolor(Z_pred['u_param'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
# plt.subplot(2, 3, 6)
|
||||
# plt.pcolor(Z_pred['p'].reshape(res, res).detach())
|
||||
# plt.colorbar()
|
||||
|
||||
# plt.show()
|
||||
# with open('ocp_mu3_a0.01_plot.txt', 'w') as f_:
|
||||
# f_.write('x y u p ys\n')
|
||||
# for (x, y), tru, pre, e in zip(unrolled_pts[:, :2],
|
||||
# Z_pred['u_param'].reshape(-1, 1),
|
||||
# Z_pred['p'].reshape(-1, 1),
|
||||
# Z_pred['y'].reshape(-1, 1),
|
||||
# ):
|
||||
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
|
||||
|
||||
|
||||
y = {}
|
||||
u = {}
|
||||
for alpha in [0.01, 0.1, 1]:
|
||||
y[alpha] = []
|
||||
u[alpha] = []
|
||||
for p in np.linspace(0.5, 3, 32):
|
||||
a = pinn.model(LabelTensor(torch.tensor([[0, 0, p, alpha]]).double(), ['x1', 'x2', 'mu', 'alpha']).tensor)
|
||||
y[alpha].append(a['y'].detach().numpy()[0])
|
||||
u[alpha].append(a['u_param'].detach().numpy()[0])
|
||||
|
||||
|
||||
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[1], label='u')
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[0.01], label='u')
|
||||
plt.plot(np.linspace(0.5, 3, 32), u[0.1], label='u')
|
||||
plt.plot([1, 2, 3], [0.28, 0.56, 0.85], 'o', label='Truth values')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
print(y[1])
|
||||
print(y[0.1])
|
||||
print(y[0.01])
|
||||
with open('elliptic_param_y.txt', 'w') as f_:
|
||||
f_.write('mu 1 01 001\n')
|
||||
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), y[1], y[0.1], y[0.01]):
|
||||
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
|
||||
|
||||
with open('elliptic_param_u.txt', 'w') as f_:
|
||||
f_.write('mu 1 01 001\n')
|
||||
for mu, y1, y01, y001 in zip(np.linspace(0.5, 3, 32), u[1], u[0.1], u[0.01]):
|
||||
f_.write('{} {} {} {}\n'.format(mu, y1, y01, y001))
|
||||
|
||||
|
||||
plt.plot(np.linspace(0.5, 3, 32), y, label='y')
|
||||
plt.plot([1, 2, 3], [0.062, 0.12, 0.19], 'o', label='Truth values')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
157
examples/run_parametric_poisson.py
Normal file
157
examples/run_parametric_poisson.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import sys
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from problems.parametric_poisson import ParametricPoisson2DProblem as Poisson2D
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return torch.exp(- 2*(x[:, 0] - x[:, 2])**2 - 2*(x[:, 1] - x[:, 3])**2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run PINA")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-s", "-save", action="store_true")
|
||||
group.add_argument("-l", "-load", action="store_true")
|
||||
parser.add_argument("id_run", help="number of run", type=int)
|
||||
parser.add_argument("features", help="extra features", type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
feat = [myFeature()] if args.features else []
|
||||
|
||||
poisson_problem = Poisson2D()
|
||||
model = DeepFeedForward(
|
||||
layers=[200, 40, 10],
|
||||
output_variables=poisson_problem.output_variables,
|
||||
input_variables=poisson_problem.input_variables+['mu1', 'mu2'],
|
||||
func=Softplus,
|
||||
extra_features=feat
|
||||
)
|
||||
|
||||
pinn = pPINN(
|
||||
poisson_problem,
|
||||
model,
|
||||
lr=0.0006,
|
||||
regularizer=1e-6,
|
||||
lr_accelerate=None)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(30, 'chebyshev', ['D'])
|
||||
pinn.span_pts(50, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
#pinn.plot_pts()
|
||||
pinn.train(10000, 10)
|
||||
pinn.save_state('pina.poisson_param')
|
||||
|
||||
else:
|
||||
pinn.load_state('pina.poisson_param')
|
||||
#pinn.plot(40, torch.tensor([-0.8, -0.8]))
|
||||
#pinn.plot(40, torch.tensor([ 0.8, 0.8]))
|
||||
|
||||
from smithers.io import VTUHandler
|
||||
from scipy.interpolate import griddata
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
res = 64
|
||||
fname_minus = 'Poisson_param_08minus000000.vtu'
|
||||
param = torch.tensor([-0.8, -0.8])
|
||||
pts_container = []
|
||||
for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1)
|
||||
|
||||
#unrolled_pts.to(dtype=self.dtype)
|
||||
unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2'])
|
||||
|
||||
Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
data = VTUHandler.read(fname_minus)
|
||||
|
||||
|
||||
print(data['points'][:, :2].shape)
|
||||
print(data['point_data']['f_16'].shape)
|
||||
print(grids_container[0].shape)
|
||||
print(grids_container[1].shape)
|
||||
Z_truth = griddata(data['points'][:, :2], data['point_data']['f_16'], (grids_container[0], grids_container[1]))
|
||||
|
||||
|
||||
err = np.abs(Z_truth + Z_pred.tensor.reshape(res, res).detach().numpy())
|
||||
|
||||
plt.subplot(1, 3, 1)
|
||||
plt.pcolor(-Z_pred.tensor.reshape(res, res).detach())
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 2)
|
||||
plt.pcolor(Z_truth)
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 3)
|
||||
plt.pcolor(err, vmin=0, vmax=0.009)
|
||||
plt.colorbar()
|
||||
plt.show()
|
||||
|
||||
print(unrolled_pts.tensor.shape)
|
||||
with open('parpoisson_minus_plot.txt', 'w') as f_:
|
||||
f_.write('x y truth pred e\n')
|
||||
for (x, y), tru, pre, e in zip(unrolled_pts[:, :2], Z_truth.reshape(-1, 1), -Z_pred.tensor.reshape(-1, 1), err.reshape(-1, 1)):
|
||||
f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
fname_plus = 'Poisson_param_08plus000000.vtu'
|
||||
param = torch.tensor([0.8, 0.8])
|
||||
pts_container = []
|
||||
for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1)
|
||||
|
||||
#unrolled_pts.to(dtype=self.dtype)
|
||||
unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2'])
|
||||
|
||||
Z_pred = pinn.model(unrolled_pts.tensor)
|
||||
data = VTUHandler.read(fname_plus)
|
||||
|
||||
|
||||
print(data['points'][:, :2].shape)
|
||||
print(data['point_data']['f_16'].shape)
|
||||
print(grids_container[0].shape)
|
||||
print(grids_container[1].shape)
|
||||
Z_truth = griddata(data['points'][:, :2], data['point_data']['f_16'], (grids_container[0], grids_container[1]))
|
||||
|
||||
|
||||
err = np.abs(Z_truth + Z_pred.tensor.reshape(res, res).detach().numpy())
|
||||
|
||||
plt.subplot(1, 3, 1)
|
||||
plt.pcolor(-Z_pred.tensor.reshape(res, res).detach())
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 2)
|
||||
plt.pcolor(Z_truth)
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 3)
|
||||
print('gggggg')
|
||||
plt.pcolor(err, vmin=0, vmax=0.001)
|
||||
plt.colorbar()
|
||||
plt.show()
|
||||
with open('parpoisson_plus_plot.txt', 'w') as f_:
|
||||
f_.write('x y truth pred e\n')
|
||||
for (x, y), tru, pre, e in zip(unrolled_pts[:, :2], Z_truth.reshape(-1, 1), -Z_pred.tensor.reshape(-1, 1), err.reshape(-1, 1)):
|
||||
f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
|
||||
|
||||
70
examples/run_poisson.py
Normal file
70
examples/run_poisson.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import sys
|
||||
import numpy as np
|
||||
import torch
|
||||
import argparse
|
||||
from pina.pinn import PINN
|
||||
from pina.ppinn import ParametricPINN as pPINN
|
||||
from pina.label_tensor import LabelTensor
|
||||
from torch.nn import ReLU, Tanh, Softplus
|
||||
from problems.poisson2D import Poisson2DProblem as Poisson2D
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
|
||||
|
||||
|
||||
class myFeature(torch.nn.Module):
|
||||
"""
|
||||
Feature: sin(x)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(myFeature, self).__init__()
|
||||
|
||||
def forward(self, x):
|
||||
return torch.sin(x[:, 0]*torch.pi) * torch.sin(x[:, 1]*torch.pi)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description="Run PINA")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-s", "-save", action="store_true")
|
||||
group.add_argument("-l", "-load", action="store_true")
|
||||
parser.add_argument("id_run", help="number of run", type=int)
|
||||
parser.add_argument("features", help="extra features", type=int)
|
||||
args = parser.parse_args()
|
||||
|
||||
feat = [myFeature()] if args.features else []
|
||||
|
||||
poisson_problem = Poisson2D()
|
||||
model = DeepFeedForward(
|
||||
layers=[10, 10],
|
||||
output_variables=poisson_problem.output_variables,
|
||||
input_variables=poisson_problem.input_variables,
|
||||
func=Softplus,
|
||||
extra_features=feat
|
||||
)
|
||||
|
||||
pinn = PINN(
|
||||
poisson_problem,
|
||||
model,
|
||||
lr=0.003,
|
||||
error_norm='mse',
|
||||
regularizer=1e-8,
|
||||
lr_accelerate=None)
|
||||
|
||||
if args.s:
|
||||
|
||||
pinn.span_pts(10, 'grid', ['D'])
|
||||
pinn.span_pts(10, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4'])
|
||||
#pinn.plot_pts()
|
||||
pinn.train(10000, 100)
|
||||
with open('poisson_history_{}_{}.txt'.format(args.id_run, args.features), 'w') as file_:
|
||||
for i, losses in enumerate(pinn.history):
|
||||
file_.write('{} {}\n'.format(i, sum(losses).item()))
|
||||
pinn.save_state('pina.poisson')
|
||||
|
||||
else:
|
||||
pinn.load_state('pina.poisson')
|
||||
pinn.plot(40)
|
||||
|
||||
|
||||
3
pina/__init__.py
Normal file
3
pina/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .pinn import PINN
|
||||
from .deep_feed_forward import DeepFeedForward
|
||||
from .problem1d import Problem1D
|
||||
6
pina/adaptive_functions/__init__.py
Normal file
6
pina/adaptive_functions/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
from .adaptive_tanh import AdaptiveTanh
|
||||
from .adaptive_sin import AdaptiveSin
|
||||
from .adaptive_cos import AdaptiveCos
|
||||
from .adaptive_linear import AdaptiveLinear
|
||||
from .adaptive_square import AdaptiveSquare
|
||||
50
pina/adaptive_functions/adaptive_cos.py
Normal file
50
pina/adaptive_functions/adaptive_cos.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveCos(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveCos, self).__init__()
|
||||
#self.in_features = in_features
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (torch.cos(self.alpha * x + self.translate))
|
||||
45
pina/adaptive_functions/adaptive_exp.py
Normal file
45
pina/adaptive_functions/adaptive_exp.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveExp(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveExp, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)
|
||||
42
pina/adaptive_functions/adaptive_linear.py
Normal file
42
pina/adaptive_functions/adaptive_linear.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveLinear(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveLinear, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)
|
||||
43
pina/adaptive_functions/adaptive_relu.py
Normal file
43
pina/adaptive_functions/adaptive_relu.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveReLU(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveReLU, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.rand(1))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return torch.relu(x+self.translate)*self.scale
|
||||
46
pina/adaptive_functions/adaptive_sin.py
Normal file
46
pina/adaptive_functions/adaptive_sin.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSin(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveSin, self).__init__()
|
||||
|
||||
# initialize alpha
|
||||
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (torch.sin(self.alpha * x + self.translate))
|
||||
42
pina/adaptive_functions/adaptive_softplus.py
Normal file
42
pina/adaptive_functions/adaptive_softplus.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSoftplus(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super().__init__()
|
||||
|
||||
self.soft = torch.nn.Softplus()
|
||||
|
||||
self.scale = Parameter(torch.rand(1))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
#x += self.translate
|
||||
return self.soft(x)*self.scale
|
||||
42
pina/adaptive_functions/adaptive_square.py
Normal file
42
pina/adaptive_functions/adaptive_square.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveSquare(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveSquare, self).__init__()
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
return self.scale * (x + self.translate)**2
|
||||
52
pina/adaptive_functions/adaptive_tanh.py
Normal file
52
pina/adaptive_functions/adaptive_tanh.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import torch
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
class AdaptiveTanh(torch.nn.Module):
|
||||
'''
|
||||
Implementation of soft exponential activation.
|
||||
Shape:
|
||||
- Input: (N, *) where * means, any number of additional
|
||||
dimensions
|
||||
- Output: (N, *), same shape as the input
|
||||
Parameters:
|
||||
- alpha - trainable parameter
|
||||
References:
|
||||
- See related paper:
|
||||
https://arxiv.org/pdf/1602.01321.pdf
|
||||
Examples:
|
||||
>>> a1 = soft_exponential(256)
|
||||
>>> x = torch.randn(256)
|
||||
>>> x = a1(x)
|
||||
'''
|
||||
def __init__(self, alpha = None):
|
||||
'''
|
||||
Initialization.
|
||||
INPUT:
|
||||
- in_features: shape of the input
|
||||
- aplha: trainable parameter
|
||||
aplha is initialized with zero value by default
|
||||
'''
|
||||
super(AdaptiveTanh, self).__init__()
|
||||
#self.in_features = in_features
|
||||
|
||||
# initialize alpha
|
||||
if alpha == None:
|
||||
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
|
||||
else:
|
||||
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
|
||||
|
||||
self.alpha.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.scale = Parameter(torch.tensor(1.0))
|
||||
self.scale.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
self.translate = Parameter(torch.tensor(0.0))
|
||||
self.translate.requiresGrad = True # set requiresGrad to true!
|
||||
|
||||
def forward(self, x):
|
||||
'''
|
||||
Forward pass of the function.
|
||||
Applies the function to the input elementwise.
|
||||
'''
|
||||
x += self.translate
|
||||
return self.scale * (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))/(torch.exp(self.alpha * x) + torch.exp(-self.alpha * x))
|
||||
6
pina/chebyshev.py
Normal file
6
pina/chebyshev.py
Normal file
@@ -0,0 +1,6 @@
|
||||
from mpmath import chebyt, chop, taylor
|
||||
import numpy as np
|
||||
def chebyshev_roots(n):
|
||||
""" Return the roots of *n* Chebyshev polynomials (between [-1, 1]) """
|
||||
return np.roots(chop(taylor(lambda x: chebyt(n, x), 0, n))[::-1])
|
||||
|
||||
29
pina/cube.py
Normal file
29
pina/cube.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
from .chebyshev import chebyshev_roots
|
||||
|
||||
class Cube():
|
||||
def __init__(self, bound):
|
||||
self.bound = np.asarray(bound)
|
||||
|
||||
def discretize(self, n, mode='random'):
|
||||
|
||||
if mode == 'random':
|
||||
pts = np.random.uniform(size=(n, self.bound.shape[0]))
|
||||
elif mode == 'chebyshev':
|
||||
pts = np.array([chebyshev_roots(n) *.5 + .5 for _ in range(self.bound.shape[0])])
|
||||
grids = np.meshgrid(*pts)
|
||||
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
|
||||
elif mode == 'grid':
|
||||
pts = np.array([np.linspace(0, 1, n) for _ in range(self.bound.shape[0])])
|
||||
grids = np.meshgrid(*pts)
|
||||
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
|
||||
elif mode == 'lh' or mode == 'latin':
|
||||
from scipy.stats import qmc
|
||||
sampler = qmc.LatinHypercube(d=self.bound.shape[0])
|
||||
pts = sampler.random(n)
|
||||
|
||||
# Scale pts
|
||||
pts *= self.bound[:, 1] - self.bound[:, 0]
|
||||
pts += self.bound[:, 0]
|
||||
|
||||
return pts
|
||||
83
pina/deep_feed_forward.py
Normal file
83
pina/deep_feed_forward.py
Normal file
@@ -0,0 +1,83 @@
|
||||
|
||||
from .problem import Problem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from .cube import Cube
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
from torch.nn import Tanh, ReLU
|
||||
#import torch.nn.utils.prune as prune
|
||||
from pina.adaptive_functions import AdaptiveLinear
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
class DeepFeedForward(torch.nn.Module):
|
||||
|
||||
def __init__(self,
|
||||
inner_size=20,
|
||||
n_layers=2,
|
||||
func=nn.Tanh,
|
||||
input_variables=None,
|
||||
output_variables=None,
|
||||
layers=None,
|
||||
extra_features=None):
|
||||
'''
|
||||
'''
|
||||
super(DeepFeedForward, self).__init__()
|
||||
|
||||
if extra_features is None:
|
||||
extra_features = []
|
||||
self.extra_features = nn.Sequential(*extra_features)
|
||||
|
||||
if input_variables is None: input_variables = ['x']
|
||||
if output_variables is None: input_variables = ['y']
|
||||
|
||||
self.input_variables = input_variables
|
||||
self.input_dimension = len(input_variables)
|
||||
|
||||
self.output_variables = output_variables
|
||||
self.output_dimension = len(output_variables)
|
||||
|
||||
n_features = len(extra_features)
|
||||
|
||||
if layers is None: layers = [inner_size] * n_layers
|
||||
|
||||
tmp_layers = layers.copy()
|
||||
tmp_layers.insert(0, self.input_dimension+n_features)#-1)
|
||||
tmp_layers.append(self.output_dimension)
|
||||
|
||||
self.layers = []
|
||||
for i in range(len(tmp_layers)-1):
|
||||
self.layers.append(nn.Linear(tmp_layers[i], tmp_layers[i+1]))
|
||||
|
||||
|
||||
|
||||
if isinstance(func, list):
|
||||
self.functions = func
|
||||
else:
|
||||
self.functions = [func for _ in range(len(self.layers)-1)]
|
||||
|
||||
|
||||
unique_list = []
|
||||
for layer, func in zip(self.layers[:-1], self.functions):
|
||||
unique_list.append(layer)
|
||||
if func is not None: unique_list.append(func())
|
||||
unique_list.append(self.layers[-1])
|
||||
|
||||
self.model = nn.Sequential(*unique_list)
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
|
||||
nf = len(self.extra_features)
|
||||
if nf == 0:
|
||||
return LabelTensor(self.model(x), self.output_variables)
|
||||
|
||||
# if self.extra_features
|
||||
#input_ = torch.zeros(x.shape[0], nf+self.input_dimension, dtype=x.dtype, device=x.device)
|
||||
input_ = torch.zeros(x.shape[0], nf+x.shape[1], dtype=x.dtype, device=x.device)
|
||||
input_[:, :x.shape[1]] = x
|
||||
for i, feature in enumerate(self.extra_features, start=self.input_dimension):
|
||||
input_[:, i] = feature(x)
|
||||
return LabelTensor(self.model(input_), self.output_variables)
|
||||
|
||||
|
||||
49
pina/label_tensor.py
Normal file
49
pina/label_tensor.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import torch
|
||||
|
||||
class LabelTensor():
|
||||
|
||||
def __init__(self, x, labels):
|
||||
|
||||
|
||||
if len(labels) != x.shape[1]:
|
||||
print(len(labels), x.shape[1])
|
||||
raise ValueError
|
||||
self.__labels = labels
|
||||
self.tensor = x
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self.labels:
|
||||
return self.tensor[:, self.labels.index(key)]
|
||||
else:
|
||||
return self.tensor.__getitem__(key)
|
||||
|
||||
def __repr__(self):
|
||||
return self.tensor
|
||||
|
||||
def __str__(self):
|
||||
return self.tensor, self.labels
|
||||
|
||||
@property
|
||||
def labels(self):
|
||||
return self.__labels
|
||||
|
||||
@staticmethod
|
||||
def hstack(labeltensor_list):
|
||||
concatenated_tensor = torch.cat([lt.tensor for lt in labeltensor_list], axis=1)
|
||||
concatenated_label = sum([lt.labels for lt in labeltensor_list], [])
|
||||
return LabelTensor(concatenated_tensor, concatenated_label)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import numpy as np
|
||||
a = np.random.uniform(size=(20, 3))
|
||||
a = np.random.uniform(size=(20, 3))
|
||||
p = torch.from_numpy(a)
|
||||
t = LabelTensor(p, labels=['u', 'p', 't'])
|
||||
print(t)
|
||||
print(t['u'])
|
||||
t *= 2
|
||||
print(t['u'])
|
||||
print(t[:, 0])
|
||||
|
||||
27
pina/multi_deep_feed_forward.py
Normal file
27
pina/multi_deep_feed_forward.py
Normal file
@@ -0,0 +1,27 @@
|
||||
|
||||
from .problem import Problem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from .cube import Cube
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
from torch.nn import Tanh, ReLU
|
||||
import torch.nn.utils.prune as prune
|
||||
from pina.adaptive_functions import AdaptiveLinear
|
||||
from pina.deep_feed_forward import DeepFeedForward
|
||||
|
||||
class MultiDeepFeedForward(torch.nn.Module):
|
||||
|
||||
def __init__(self, dff_dict):
|
||||
'''
|
||||
'''
|
||||
super().__init__()
|
||||
|
||||
if not isinstance(dff_dict, dict):
|
||||
raise TypeError
|
||||
|
||||
for name, constructor_args in dff_dict.items():
|
||||
setattr(self, name, DeepFeedForward(**constructor_args))
|
||||
|
||||
|
||||
|
||||
9
pina/parametricproblem2d.py
Normal file
9
pina/parametricproblem2d.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .problem2d import Problem2D
|
||||
import numpy as np
|
||||
|
||||
class ParametricProblem2D(Problem2D):
|
||||
|
||||
def __init__(self, variables=None, bc=None, params_bound=None, domain_bound=None):
|
||||
|
||||
Problem2D.__init__(self, variables=variables, bc=bc, domain_bound=domain_bound)
|
||||
self.params_domain = params_bound
|
||||
488
pina/pinn.py
Normal file
488
pina/pinn.py
Normal file
@@ -0,0 +1,488 @@
|
||||
from mpmath import chebyt, chop, taylor
|
||||
|
||||
from .problem import Problem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from .cube import Cube
|
||||
from .segment import Segment
|
||||
from .deep_feed_forward import DeepFeedForward
|
||||
from pina.label_tensor import LabelTensor
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
|
||||
class PINN(object):
|
||||
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=torch.optim.Adam,
|
||||
lr=0.001,
|
||||
regularizer=0.00001,
|
||||
data_weight=1.,
|
||||
dtype=torch.float64,
|
||||
device='cpu',
|
||||
lr_accelerate=None,
|
||||
error_norm='mse'):
|
||||
'''
|
||||
:param Problem problem: the formualation of the problem.
|
||||
:param dict architecture: a dictionary containing the information to
|
||||
build the model. Valid options are:
|
||||
- inner_size [int] the number of neurons in the hidden layers; by
|
||||
default is 20.
|
||||
- n_layers [int] the number of hidden layers; by default is 4.
|
||||
- func [nn.Module or str] the activation function; passing a `str`
|
||||
is possible to chose adaptive function (between 'adapt_tanh'); by
|
||||
default is non-adaptive iperbolic tangent.
|
||||
:param float lr: the learning rate; default is 0.001
|
||||
:param float regularizer: the coefficient for L2 regularizer term
|
||||
:param type dtype: the data type to use for the model. Valid option are
|
||||
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
|
||||
default is `torch.float64`.
|
||||
:param float lr_accelete: the coefficient that controls the learning
|
||||
rate increase, such that, for all the epoches in which the loss is
|
||||
decreasing, the learning_rate is update using
|
||||
$learning_rate = learning_rate * lr_accelerate$.
|
||||
When the loss stops to decrease, the learning rate is set to the
|
||||
initial value [TODO test parameters]
|
||||
|
||||
'''
|
||||
|
||||
self.problem = problem
|
||||
|
||||
|
||||
# self._architecture = architecture if architecture else dict()
|
||||
# self._architecture['input_dimension'] = self.problem.domain_bound.shape[0]
|
||||
# self._architecture['output_dimension'] = len(self.problem.variables)
|
||||
# if hasattr(self.problem, 'params_domain'):
|
||||
# self._architecture['input_dimension'] += self.problem.params_domain.shape[0]
|
||||
|
||||
self.accelerate = lr_accelerate
|
||||
|
||||
self.error_norm = error_norm
|
||||
|
||||
if device == 'cuda' and not torch.cuda.is_available():
|
||||
raise RuntimeError
|
||||
self.device = torch.device(device)
|
||||
|
||||
self.dtype = dtype
|
||||
self.history = []
|
||||
|
||||
self.model = model
|
||||
self.model.to(dtype=self.dtype, device=self.device)
|
||||
|
||||
self.input_pts = {}
|
||||
self.truth_values = {}
|
||||
|
||||
|
||||
self.trained_epoch = 0
|
||||
self.optimizer = optimizer(
|
||||
self.model.parameters(), lr=lr, weight_decay=regularizer)
|
||||
|
||||
self.data_weight = data_weight
|
||||
|
||||
@property
|
||||
def problem(self):
|
||||
return self._problem
|
||||
|
||||
@problem.setter
|
||||
def problem(self, problem):
|
||||
if not isinstance(problem, Problem):
|
||||
raise TypeError
|
||||
self._problem = problem
|
||||
|
||||
def get_data_residuals(self):
|
||||
|
||||
data_residuals = []
|
||||
|
||||
for output in self.data_pts:
|
||||
data_values_pred = self.model(self.data_pts[output])
|
||||
data_residuals.append(data_values_pred - self.data_values[output])
|
||||
|
||||
return torch.cat(data_residuals)
|
||||
|
||||
def get_phys_residuals(self):
|
||||
"""
|
||||
"""
|
||||
|
||||
residuals = []
|
||||
for equation in self.problem.equation:
|
||||
residuals.append(equation(self.phys_pts, self.model(self.phys_pts)))
|
||||
return residuals
|
||||
|
||||
|
||||
def _compute_norm(self, vec):
|
||||
"""
|
||||
Compute the norm of the `vec` one-dimensional tensor based on the
|
||||
`self.error_norm` attribute.
|
||||
|
||||
.. todo: complete
|
||||
|
||||
:param vec torch.tensor: the tensor
|
||||
"""
|
||||
if isinstance(self.error_norm, int):
|
||||
return torch.sum(torch.abs(vec**self.error_norm))**(1./self.error_norm)
|
||||
elif self.error_norm == 'mse':
|
||||
return torch.mean(vec**2)
|
||||
elif self.error_norm == 'me':
|
||||
return torch.mean(torch.abs(vec))
|
||||
else:
|
||||
raise RuntimeError
|
||||
|
||||
def save_state(self, filename):
|
||||
|
||||
checkpoint = {
|
||||
'epoch': self.trained_epoch,
|
||||
'model_state': self.model.state_dict(),
|
||||
'optimizer_state' : self.optimizer.state_dict(),
|
||||
'optimizer_class' : self.optimizer.__class__,
|
||||
'history' : self.history,
|
||||
}
|
||||
|
||||
# TODO save also architecture param?
|
||||
#if isinstance(self.model, DeepFeedForward):
|
||||
# checkpoint['model_class'] = self.model.__class__
|
||||
# checkpoint['model_structure'] = {
|
||||
# }
|
||||
torch.save(checkpoint, filename)
|
||||
|
||||
def load_state(self, filename):
|
||||
|
||||
checkpoint = torch.load(filename)
|
||||
self.model.load_state_dict(checkpoint['model_state'])
|
||||
|
||||
|
||||
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
|
||||
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
|
||||
|
||||
self.trained_epoch = checkpoint['epoch']
|
||||
self.history = checkpoint['history']
|
||||
|
||||
print(self.history)
|
||||
return self
|
||||
|
||||
|
||||
def span_pts(self, n, mode='grid', locations='all'):
|
||||
'''
|
||||
|
||||
'''
|
||||
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
|
||||
|
||||
for location in locations:
|
||||
manifold, func = self.problem.conditions[location].values()
|
||||
if torch.is_tensor(manifold):
|
||||
pts = manifold
|
||||
else:
|
||||
pts = manifold.discretize(n, mode)
|
||||
|
||||
pts = torch.from_numpy(pts)
|
||||
|
||||
self.input_pts[location] = LabelTensor(pts, self.problem.input_variables)
|
||||
self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device)
|
||||
self.input_pts[location].tensor.requires_grad_(True)
|
||||
self.input_pts[location].tensor.retain_grad()
|
||||
|
||||
|
||||
def plot_pts(self, locations='all'):
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
|
||||
for location in locations:
|
||||
x, y = self.input_pts[location].tensor.T
|
||||
#plt.plot(x.detach(), y.detach(), 'o', label=location)
|
||||
np.savetxt('burgers_{}_pts.txt'.format(location), self.input_pts[location].tensor.detach(), header='x y', delimiter=' ')
|
||||
|
||||
gggg
|
||||
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def train(self, stop=100, frequency_print=2, trial=None):
|
||||
|
||||
epoch = 0
|
||||
|
||||
while True:
|
||||
|
||||
losses = []
|
||||
for condition_name in self.problem.conditions:
|
||||
pts = self.input_pts[condition_name]
|
||||
predicted = self.model(pts.tensor)
|
||||
if isinstance(self.problem.conditions[condition_name]['func'], list):
|
||||
for func in self.problem.conditions[condition_name]['func']:
|
||||
residuals = func(pts, predicted)
|
||||
losses.append(self._compute_norm(residuals))
|
||||
else:
|
||||
residuals = self.problem.conditions[condition_name]['func'](pts, predicted)
|
||||
losses.append(self._compute_norm(residuals))
|
||||
#print(condition_name, losses[-1])
|
||||
|
||||
self.optimizer.zero_grad()
|
||||
sum(losses).backward()
|
||||
self.optimizer.step()
|
||||
|
||||
self.trained_epoch += 1
|
||||
if epoch % 50 == 0:
|
||||
self.history.append([loss.detach().item() for loss in losses])
|
||||
epoch += 1
|
||||
|
||||
if trial:
|
||||
import optuna
|
||||
trial.report(loss[0].item()+loss[1].item(), epoch)
|
||||
if trial.should_prune():
|
||||
raise optuna.exceptions.TrialPruned()
|
||||
|
||||
if isinstance(stop, int):
|
||||
if epoch == stop:
|
||||
break
|
||||
elif isinstance(stop, float):
|
||||
if sum(losses) < stop:
|
||||
break
|
||||
|
||||
if epoch % frequency_print == 0:
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
print()
|
||||
|
||||
return sum(losses).item()
|
||||
|
||||
|
||||
def error(self, dtype='l2', res=100):
|
||||
|
||||
import numpy as np
|
||||
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
|
||||
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
try:
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device)
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
|
||||
if dtype == 'l2':
|
||||
return np.linalg.norm(Z_pred - Z_true)/np.linalg.norm(Z_true)
|
||||
else:
|
||||
# TODO H1
|
||||
pass
|
||||
except:
|
||||
print("")
|
||||
print("Something went wrong...")
|
||||
print("Not able to compute the error. Please pass a data solution or a true solution")
|
||||
|
||||
def plot(self, res, filename=None, variable=None):
|
||||
'''
|
||||
'''
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
pts_container = []
|
||||
#for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
for mn, mx in [[0, 1], [0, 1]]:
|
||||
#for mn, mx in [[-1, 1], [0, 1]]:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
unrolled_pts.to(dtype=self.dtype)
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
|
||||
#######################################################
|
||||
# poisson
|
||||
# Z_truth = self.problem.truth_solution(unrolled_pts[:, 0], unrolled_pts[:, 1])
|
||||
# Z_pred = Z_pred.tensor.detach().reshape(grids_container[0].shape)
|
||||
# Z_truth = Z_truth.detach().reshape(grids_container[0].shape)
|
||||
# err = np.abs(Z_pred-Z_truth)
|
||||
|
||||
|
||||
# with open('poisson2_nofeat_plot.txt', 'w') as f_:
|
||||
# f_.write('x y truth pred e\n')
|
||||
# for (x, y), tru, pre, e in zip(unrolled_pts, Z_truth.reshape(-1, 1), Z_pred.reshape(-1, 1), err.reshape(-1, 1)):
|
||||
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
# n = Z_pred.shape[1]
|
||||
# plt.figure(figsize=(16, 6))
|
||||
# plt.subplot(1, 3, 1)
|
||||
# plt.contourf(*grids_container, Z_truth)
|
||||
# plt.colorbar()
|
||||
# plt.subplot(1, 3, 2)
|
||||
# plt.contourf(*grids_container, Z_pred)
|
||||
# plt.colorbar()
|
||||
# plt.subplot(1, 3, 3)
|
||||
# plt.contourf(*grids_container, err)
|
||||
# plt.colorbar()
|
||||
# plt.show()
|
||||
|
||||
#######################################################
|
||||
# burgers
|
||||
import scipy
|
||||
data = scipy.io.loadmat('Data/burgers_shock.mat')
|
||||
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
|
||||
|
||||
grids_container = data_solution['grid']
|
||||
print(data_solution['grid_solution'].shape)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
unrolled_pts.to(dtype=self.dtype)
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
Z_truth = data_solution['grid_solution']
|
||||
|
||||
Z_pred = Z_pred.tensor.detach().reshape(grids_container[0].shape)
|
||||
print(Z_pred, Z_truth)
|
||||
err = np.abs(Z_pred.numpy()-Z_truth)
|
||||
|
||||
|
||||
with open('burgers_nofeat_plot.txt', 'w') as f_:
|
||||
f_.write('x y truth pred e\n')
|
||||
for (x, y), tru, pre, e in zip(unrolled_pts, Z_truth.reshape(-1, 1), Z_pred.reshape(-1, 1), err.reshape(-1, 1)):
|
||||
f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
|
||||
n = Z_pred.shape[1]
|
||||
plt.figure(figsize=(16, 6))
|
||||
plt.subplot(1, 3, 1)
|
||||
plt.contourf(*grids_container, Z_truth,vmin=-1, vmax=1)
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 2)
|
||||
plt.contourf(*grids_container, Z_pred, vmin=-1, vmax=1)
|
||||
plt.colorbar()
|
||||
plt.subplot(1, 3, 3)
|
||||
plt.contourf(*grids_container, err)
|
||||
plt.colorbar()
|
||||
plt.show()
|
||||
|
||||
|
||||
# for i, output in enumerate(Z_pred.tensor.T, start=1):
|
||||
# output = output.detach().numpy().reshape(grids_container[0].shape)
|
||||
# plt.subplot(1, n, i)
|
||||
# plt.contourf(*grids_container, output)
|
||||
# plt.colorbar()
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
plt.savefig(filename)
|
||||
|
||||
def plot_params(self, res, param, filename=None, variable=None):
|
||||
'''
|
||||
'''
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
n_plot = 2
|
||||
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
n_plot = 2
|
||||
else:
|
||||
n_plot = 1
|
||||
|
||||
fig, axs = plt.subplots(nrows=1, ncols=n_plot, figsize=(n_plot*6,4))
|
||||
if not isinstance(axs, np.ndarray): axs = [axs]
|
||||
|
||||
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
|
||||
#print(unrolled_pts)
|
||||
#print(param)
|
||||
param_unrolled_pts = torch.cat((unrolled_pts, param.repeat(unrolled_pts.shape[0], 1)), 1)
|
||||
if variable==None:
|
||||
variable = self.problem.variables[0]
|
||||
Z_pred = self.evaluate(param_unrolled_pts)[variable]
|
||||
variable = "Solution"
|
||||
else:
|
||||
Z_pred = self.evaluate(param_unrolled_pts)[variable]
|
||||
|
||||
Z_pred= Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
set_pred = axs[0].contourf(*grids_container, Z_pred)
|
||||
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + " " + variable) #TODO add info about parameter in the title
|
||||
fig.colorbar(set_pred, ax=axs[0])
|
||||
|
||||
if n_plot == 2:
|
||||
|
||||
set_true = axs[1].contourf(*grids_container, Z_true)
|
||||
|
||||
axs[1].set_title('Truth solution')
|
||||
fig.colorbar(set_true, ax=axs[1])
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
fig.savefig(filename + " " + variable)
|
||||
|
||||
def plot_error(self, res, filename=None):
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(6,4))
|
||||
if not isinstance(axs, np.ndarray): axs = [axs]
|
||||
|
||||
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
try:
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
|
||||
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
set_pred = axs[0].contourf(*grids_container, abs(Z_pred - Z_true))
|
||||
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + "Pointwise Error")
|
||||
fig.colorbar(set_pred, ax=axs[0])
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
fig.savefig(filename)
|
||||
except:
|
||||
print("")
|
||||
print("Something went wrong...")
|
||||
print("Not able to plot the error. Please pass a data solution or a true solution")
|
||||
|
||||
'''
|
||||
print(self.pred_loss.item(),loss.item(), self.old_loss.item())
|
||||
if self.accelerate is not None:
|
||||
if self.pred_loss > loss and loss >= self.old_loss:
|
||||
self.current_lr = self.original_lr
|
||||
#print('restart')
|
||||
elif (loss-self.pred_loss).item() < 0.1:
|
||||
self.current_lr += .5*self.current_lr
|
||||
#print('powa')
|
||||
else:
|
||||
self.current_lr -= .5*self.current_lr
|
||||
#print(self.current_lr)
|
||||
#self.current_lr = min(loss.item()*3, 0.02)
|
||||
|
||||
for g in self.optimizer.param_groups:
|
||||
g['lr'] = self.current_lr
|
||||
'''
|
||||
465
pina/ppinn.py
Normal file
465
pina/ppinn.py
Normal file
@@ -0,0 +1,465 @@
|
||||
from mpmath import chebyt, chop, taylor
|
||||
|
||||
from .problem import Problem
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from .cube import Cube
|
||||
from .deep_feed_forward import DeepFeedForward
|
||||
from pina.label_tensor import LabelTensor
|
||||
from pina.pinn import PINN
|
||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||
|
||||
class ParametricPINN(PINN):
|
||||
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
optimizer=torch.optim.Adam,
|
||||
lr=0.001,
|
||||
regularizer=0.00001,
|
||||
data_weight=1.,
|
||||
dtype=torch.float64,
|
||||
device='cpu',
|
||||
lr_accelerate=None,
|
||||
error_norm='mse'):
|
||||
'''
|
||||
:param Problem problem: the formualation of the problem.
|
||||
:param dict architecture: a dictionary containing the information to
|
||||
build the model. Valid options are:
|
||||
- inner_size [int] the number of neurons in the hidden layers; by
|
||||
default is 20.
|
||||
- n_layers [int] the number of hidden layers; by default is 4.
|
||||
- func [nn.Module or str] the activation function; passing a `str`
|
||||
is possible to chose adaptive function (between 'adapt_tanh'); by
|
||||
default is non-adaptive iperbolic tangent.
|
||||
:param float lr: the learning rate; default is 0.001
|
||||
:param float regularizer: the coefficient for L2 regularizer term
|
||||
:param type dtype: the data type to use for the model. Valid option are
|
||||
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
|
||||
default is `torch.float64`.
|
||||
:param float lr_accelete: the coefficient that controls the learning
|
||||
rate increase, such that, for all the epoches in which the loss is
|
||||
decreasing, the learning_rate is update using
|
||||
$learning_rate = learning_rate * lr_accelerate$.
|
||||
When the loss stops to decrease, the learning rate is set to the
|
||||
initial value [TODO test parameters]
|
||||
|
||||
'''
|
||||
|
||||
self.problem = problem
|
||||
|
||||
|
||||
# self._architecture = architecture if architecture else dict()
|
||||
# self._architecture['input_dimension'] = self.problem.domain_bound.shape[0]
|
||||
# self._architecture['output_dimension'] = len(self.problem.variables)
|
||||
# if hasattr(self.problem, 'params_domain'):
|
||||
# self._architecture['input_dimension'] += self.problem.params_domain.shape[0]
|
||||
|
||||
self.accelerate = lr_accelerate
|
||||
|
||||
self.error_norm = error_norm
|
||||
|
||||
if device == 'cuda' and not torch.cuda.is_available():
|
||||
raise RuntimeError
|
||||
self.device = torch.device(device)
|
||||
|
||||
self.dtype = dtype
|
||||
self.history = []
|
||||
|
||||
self.model = model
|
||||
self.model.to(dtype=self.dtype, device=self.device)
|
||||
|
||||
self.input_pts = {}
|
||||
self.truth_values = {}
|
||||
|
||||
|
||||
self.trained_epoch = 0
|
||||
self.optimizer = optimizer(
|
||||
self.model.parameters(), lr=lr, weight_decay=regularizer)
|
||||
|
||||
self.data_weight = data_weight
|
||||
|
||||
@property
|
||||
def problem(self):
|
||||
return self._problem
|
||||
|
||||
@problem.setter
|
||||
def problem(self, problem):
|
||||
if not isinstance(problem, Problem):
|
||||
raise TypeError
|
||||
self._problem = problem
|
||||
|
||||
def get_data_residuals(self):
|
||||
|
||||
data_residuals = []
|
||||
|
||||
for output in self.data_pts:
|
||||
data_values_pred = self.model(self.data_pts[output])
|
||||
data_residuals.append(data_values_pred - self.data_values[output])
|
||||
|
||||
return torch.cat(data_residuals)
|
||||
|
||||
def get_phys_residuals(self):
|
||||
"""
|
||||
"""
|
||||
|
||||
residuals = []
|
||||
for equation in self.problem.equation:
|
||||
residuals.append(equation(self.phys_pts, self.model(self.phys_pts)))
|
||||
return residuals
|
||||
|
||||
|
||||
def _compute_norm(self, vec):
|
||||
"""
|
||||
Compute the norm of the `vec` one-dimensional tensor based on the
|
||||
`self.error_norm` attribute.
|
||||
|
||||
.. todo: complete
|
||||
|
||||
:param vec torch.tensor: the tensor
|
||||
"""
|
||||
if isinstance(self.error_norm, int):
|
||||
return torch.sum(torch.abs(vec**self.error_norm))**(1./self.error_norm)
|
||||
elif self.error_norm == 'mse':
|
||||
return torch.mean(vec**2)
|
||||
elif self.error_norm == 'me':
|
||||
return torch.mean(torch.abs(vec))
|
||||
else:
|
||||
raise RuntimeError
|
||||
|
||||
def save_state(self, filename):
|
||||
|
||||
checkpoint = {
|
||||
'epoch': self.trained_epoch,
|
||||
'model_state': self.model.state_dict(),
|
||||
'optimizer_state' : self.optimizer.state_dict(),
|
||||
'optimizer_class' : self.optimizer.__class__,
|
||||
}
|
||||
|
||||
# TODO save also architecture param?
|
||||
#if isinstance(self.model, DeepFeedForward):
|
||||
# checkpoint['model_class'] = self.model.__class__
|
||||
# checkpoint['model_structure'] = {
|
||||
# }
|
||||
torch.save(checkpoint, filename)
|
||||
|
||||
def load_state(self, filename):
|
||||
|
||||
checkpoint = torch.load(filename)
|
||||
self.model.load_state_dict(checkpoint['model_state'])
|
||||
|
||||
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
|
||||
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
|
||||
|
||||
self.trained_epoch = checkpoint['epoch']
|
||||
return self
|
||||
|
||||
|
||||
def span_pts(self, n, mode='grid', locations='all'):
|
||||
'''
|
||||
|
||||
'''
|
||||
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
|
||||
|
||||
for location in locations:
|
||||
manifold, func = self.problem.conditions[location].values()
|
||||
if torch.is_tensor(manifold):
|
||||
pts = manifold
|
||||
else:
|
||||
pts = manifold.discretize(n, mode)
|
||||
|
||||
pts = torch.from_numpy(pts)
|
||||
|
||||
self.input_pts[location] = LabelTensor(pts, self.problem.input_variables)
|
||||
self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device)
|
||||
self.input_pts[location].tensor.requires_grad_(True)
|
||||
self.input_pts[location].tensor.retain_grad()
|
||||
|
||||
|
||||
def train(self, stop=100, frequency_print=2, trial=None):
|
||||
|
||||
epoch = 0
|
||||
|
||||
## TODO for elliptic
|
||||
# parameters = torch.cat(torch.linspace(
|
||||
# self.problem.parameter_domain[0, 0],
|
||||
# self.problem.parameter_domain[0, 1],
|
||||
# 5)
|
||||
|
||||
## for param laplacian
|
||||
#parameters = torch.rand(50, 2)*2-1
|
||||
parameters = torch.from_numpy(Cube([[-1, 1], [-1, 1]]).discretize(5, 'grid'))
|
||||
# alpha_p = torch.logspace(start=-2, end=0, steps=10)
|
||||
# mu_p = torch.linspace(0.5, 3, 5)
|
||||
# g1_, g2_ = torch.meshgrid(alpha_p, mu_p)
|
||||
# parameters = torch.cat([g2_.reshape(-1, 1), g1_.reshape(-1, 1)], axis=1)
|
||||
print(parameters)
|
||||
|
||||
while True:
|
||||
|
||||
losses = []
|
||||
|
||||
for condition_name in self.problem.conditions:
|
||||
|
||||
pts = self.input_pts[condition_name]
|
||||
pts = torch.cat([
|
||||
pts.tensor.repeat_interleave(parameters.shape[0], dim=0),
|
||||
torch.tile(parameters, (pts.tensor.shape[0], 1))
|
||||
], axis=1)
|
||||
pts = LabelTensor(pts, self.problem.input_variables + self.problem.parameters)
|
||||
predicted = self.model(pts.tensor)
|
||||
#predicted = self.model(pts)
|
||||
|
||||
if isinstance(self.problem.conditions[condition_name]['func'], list):
|
||||
for func in self.problem.conditions[condition_name]['func']:
|
||||
residuals = func(pts, None, predicted)
|
||||
losses.append(self._compute_norm(residuals))
|
||||
else:
|
||||
residuals = self.problem.conditions[condition_name]['func'](pts, None, predicted)
|
||||
losses.append(self._compute_norm(residuals))
|
||||
self.optimizer.zero_grad()
|
||||
sum(losses).backward()
|
||||
self.optimizer.step()
|
||||
|
||||
#for p in parameters:
|
||||
# pts = self.input_pts[condition_name]
|
||||
# #pts = torch.cat([pts.tensor, p.double().repeat(pts.tensor.shape[0]).reshape(-1, 2)], axis=1)
|
||||
# #pts = torch.cat([pts.tensor, p.double().repeat(pts.tensor.shape[0]).reshape(-1, 1)], axis=1)
|
||||
# #print(self.problem.input_variables)
|
||||
# # print(self.problem.parameters)
|
||||
# # print(pts.shape)
|
||||
# print(pts.tensor.repeat_interleave(parameters.shape[0]))
|
||||
# # print(pts)
|
||||
# # gg
|
||||
# a = torch.cat([
|
||||
# pts.tensor.repeat_interleave(parameters.shape[0], dim=0),
|
||||
# torch.tile(parameters, (pts.tensor.shape[0], 1))
|
||||
# ], axis=1)
|
||||
# for i in a:
|
||||
# print(i.detach())
|
||||
# ttt
|
||||
# pts = LabelTensor(pts, self.problem.input_variables + self.problem.parameters)
|
||||
|
||||
|
||||
# ffff
|
||||
# print(pts.labels)
|
||||
|
||||
# predicted = self.model(pts.tensor)
|
||||
# #predicted = self.model(pts)
|
||||
# if isinstance(self.problem.conditions[condition_name]['func'], list):
|
||||
# for func in self.problem.conditions[condition_name]['func']:
|
||||
# residuals = func(pts, LabelTensor(p.reshape(1, -1), ['mu', 'alpha']), predicted)
|
||||
# tmp_losses.append(self._compute_norm(residuals))
|
||||
# else:
|
||||
# residuals = self.problem.conditions[condition_name]['func'](pts, p, predicted)
|
||||
# tmp_losses.append(self._compute_norm(residuals))
|
||||
#losses.append(sum(tmp_losses))
|
||||
|
||||
|
||||
self.trained_epoch += 1
|
||||
#if epoch % 10 == 0:
|
||||
# self.history.append(losses)
|
||||
|
||||
epoch += 1
|
||||
|
||||
if trial:
|
||||
import optuna
|
||||
rial.report(loss[0].item()+loss[1].item(), epoch)
|
||||
if trial.should_prune():
|
||||
raise optuna.exceptions.TrialPruned()
|
||||
|
||||
if isinstance(stop, int):
|
||||
if epoch == stop:
|
||||
break
|
||||
elif isinstance(stop, float):
|
||||
if loss[0].item() + loss[1].item() < stop:
|
||||
break
|
||||
|
||||
if epoch % frequency_print == 0:
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
print()
|
||||
|
||||
return sum(losses).item()
|
||||
|
||||
|
||||
def error(self, dtype='l2', res=100):
|
||||
|
||||
import numpy as np
|
||||
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
|
||||
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
try:
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device)
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
|
||||
if dtype == 'l2':
|
||||
return np.linalg.norm(Z_pred - Z_true)/np.linalg.norm(Z_true)
|
||||
else:
|
||||
# TODO H1
|
||||
pass
|
||||
except:
|
||||
print("")
|
||||
print("Something went wrong...")
|
||||
print("Not able to compute the error. Please pass a data solution or a true solution")
|
||||
|
||||
def plot(self, res, param, filename=None, variable=None):
|
||||
'''
|
||||
'''
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
pts_container = []
|
||||
for mn, mx in [[-1, 1], [-1, 1]]:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
|
||||
unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1)
|
||||
|
||||
unrolled_pts.to(dtype=self.dtype)
|
||||
unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2'])
|
||||
|
||||
Z_pred = self.model(unrolled_pts.tensor)
|
||||
n = Z_pred.tensor.shape[1]
|
||||
plt.figure(figsize=(6*n, 6))
|
||||
|
||||
for i, output in enumerate(Z_pred.tensor.T, start=1):
|
||||
output = output.detach().numpy().reshape(grids_container[0].shape)
|
||||
plt.subplot(1, n, i)
|
||||
plt.contourf(*grids_container, output)
|
||||
plt.colorbar()
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
plt.savefig(filename)
|
||||
|
||||
def plot_params(self, res, param, filename=None, variable=None):
|
||||
'''
|
||||
'''
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
n_plot = 2
|
||||
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
n_plot = 2
|
||||
else:
|
||||
n_plot = 1
|
||||
|
||||
fig, axs = plt.subplots(nrows=1, ncols=n_plot, figsize=(n_plot*6,4))
|
||||
if not isinstance(axs, np.ndarray): axs = [axs]
|
||||
|
||||
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
|
||||
#print(unrolled_pts)
|
||||
#print(param)
|
||||
param_unrolled_pts = torch.cat((unrolled_pts, param.repeat(unrolled_pts.shape[0], 1)), 1)
|
||||
if variable==None:
|
||||
variable = self.problem.variables[0]
|
||||
Z_pred = self.evaluate(param_unrolled_pts)[variable]
|
||||
variable = "Solution"
|
||||
else:
|
||||
Z_pred = self.evaluate(param_unrolled_pts)[variable]
|
||||
|
||||
Z_pred= Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
set_pred = axs[0].contourf(*grids_container, Z_pred)
|
||||
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + " " + variable) #TODO add info about parameter in the title
|
||||
fig.colorbar(set_pred, ax=axs[0])
|
||||
|
||||
if n_plot == 2:
|
||||
|
||||
set_true = axs[1].contourf(*grids_container, Z_true)
|
||||
|
||||
axs[1].set_title('Truth solution')
|
||||
fig.colorbar(set_true, ax=axs[1])
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
fig.savefig(filename + " " + variable)
|
||||
|
||||
def plot_error(self, res, filename=None):
|
||||
import matplotlib
|
||||
matplotlib.use('GTK3Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(6,4))
|
||||
if not isinstance(axs, np.ndarray): axs = [axs]
|
||||
|
||||
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
|
||||
grids_container = self.problem.data_solution['grid']
|
||||
Z_true = self.problem.data_solution['grid_solution']
|
||||
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
|
||||
pts_container = []
|
||||
for mn, mx in self.problem.domain_bound:
|
||||
pts_container.append(np.linspace(mn, mx, res))
|
||||
|
||||
grids_container = np.meshgrid(*pts_container)
|
||||
Z_true = self.problem.truth_solution(*grids_container)
|
||||
try:
|
||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
|
||||
|
||||
Z_pred = self.model(unrolled_pts)
|
||||
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||
set_pred = axs[0].contourf(*grids_container, abs(Z_pred - Z_true))
|
||||
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + "Pointwise Error")
|
||||
fig.colorbar(set_pred, ax=axs[0])
|
||||
|
||||
if filename is None:
|
||||
plt.show()
|
||||
else:
|
||||
fig.savefig(filename)
|
||||
except:
|
||||
print("")
|
||||
print("Something went wrong...")
|
||||
print("Not able to plot the error. Please pass a data solution or a true solution")
|
||||
|
||||
'''
|
||||
print(self.pred_loss.item(),loss.item(), self.old_loss.item())
|
||||
if self.accelerate is not None:
|
||||
if self.pred_loss > loss and loss >= self.old_loss:
|
||||
self.current_lr = self.original_lr
|
||||
#print('restart')
|
||||
elif (loss-self.pred_loss).item() < 0.1:
|
||||
self.current_lr += .5*self.current_lr
|
||||
#print('powa')
|
||||
else:
|
||||
self.current_lr -= .5*self.current_lr
|
||||
#print(self.current_lr)
|
||||
#self.current_lr = min(loss.item()*3, 0.02)
|
||||
|
||||
for g in self.optimizer.param_groups:
|
||||
g['lr'] = self.current_lr
|
||||
'''
|
||||
49
pina/problem.py
Normal file
49
pina/problem.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import torch
|
||||
|
||||
class Problem(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
raise NotImplemented
|
||||
|
||||
@property
|
||||
def variables(self):
|
||||
return self._variables
|
||||
|
||||
@variables.setter
|
||||
def variables(self, variables):
|
||||
if variables is None:
|
||||
variables = ['var']
|
||||
self._variables = variables
|
||||
|
||||
@property
|
||||
def boundary_conditions(self):
|
||||
return self._bc
|
||||
|
||||
@boundary_conditions.setter
|
||||
def boundary_conditions(self, bc):
|
||||
if isinstance(bc, (list, tuple)):
|
||||
bc = {'var': bc}
|
||||
self._bc = bc
|
||||
|
||||
@property
|
||||
def spatial_dimensions(self):
|
||||
return self._spatial_dimensions
|
||||
|
||||
@staticmethod
|
||||
def grad(output_, input_):
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
from pina.label_tensor import LabelTensor
|
||||
return LabelTensor(gradients, input_.labels)
|
||||
|
||||
|
||||
|
||||
def __str__(self):
|
||||
s = ''
|
||||
#s = 'Variables: {}\n'.format(self.variables)
|
||||
return s
|
||||
11
pina/problem1d.py
Normal file
11
pina/problem1d.py
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
from .problem import Problem
|
||||
import numpy as np
|
||||
|
||||
class Problem1D(Problem):
|
||||
|
||||
def __init__(self, variables=None, bc=None):
|
||||
self._spatial_dimensions = 1
|
||||
self.variables = variables
|
||||
print(bc)
|
||||
self.bc = bc
|
||||
16
pina/problem2d.py
Normal file
16
pina/problem2d.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from .problem import Problem
|
||||
|
||||
|
||||
class Problem2D(Problem):
|
||||
|
||||
spatial_dimensions = 2
|
||||
|
||||
@property
|
||||
def boundary_condition(self):
|
||||
return self._boundary_condition
|
||||
|
||||
@boundary_condition.setter
|
||||
def boundary_condition(self, bc):
|
||||
self._boundary_condition = bc
|
||||
|
||||
|
||||
27
pina/segment.py
Normal file
27
pina/segment.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
from .chebyshev import chebyshev_roots
|
||||
|
||||
class Segment():
|
||||
def __init__(self, p1, p2):
|
||||
self.p1 = p1
|
||||
self.p2 = p2
|
||||
|
||||
def discretize(self, n, mode='random'):
|
||||
pts = []
|
||||
|
||||
if mode == 'random':
|
||||
iterator = np.random.uniform(0, 1, n)
|
||||
elif mode == 'grid':
|
||||
iterator = np.linspace(0, 1, n)
|
||||
elif mode == 'chebyshev':
|
||||
iterator = chebyshev_roots(n) * .5 + .5
|
||||
|
||||
for k in iterator:
|
||||
x = self.p1[0] + k*(self.p2[0]-self.p1[0])
|
||||
y = self.p1[1] + k*(self.p2[1]-self.p1[1])
|
||||
pts.append((x, y))
|
||||
|
||||
pts = np.array(pts)
|
||||
return pts
|
||||
|
||||
27
pina/tdproblem1d.py
Normal file
27
pina/tdproblem1d.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import numpy as np
|
||||
from .problem1d import Problem1D
|
||||
from .segment import Segment
|
||||
|
||||
class TimeDepProblem1D(Problem1D):
|
||||
|
||||
def __init__(self, variables=None, bc=None, initial=None, tend=None, domain_bound=None):
|
||||
self.variables = variables
|
||||
self._spatial_dimensions = 2
|
||||
self.tend = tend
|
||||
self.tstart = 0
|
||||
if domain_bound is None:
|
||||
bound_pts = [bc[0] for bc in self.boundary_conditions]
|
||||
domain_bound = np.array([
|
||||
[min(bound_pts), max(bound_pts)],
|
||||
[self.tstart, self.tend ]
|
||||
])
|
||||
|
||||
self.domain_bound = np.array([[-1, 1],[0, 1]])#domain_bound
|
||||
print(domain_bound)
|
||||
self.boundary_conditions = (
|
||||
(Segment((bc[0][0], self.tstart), (bc[1][0], self.tstart)), initial),
|
||||
(Segment((bc[0][0], self.tstart), (bc[0][0], self.tend)), bc[0][1]),
|
||||
(Segment((bc[1][0], self.tstart), (bc[1][0], self.tend)), bc[1][1])
|
||||
)
|
||||
|
||||
|
||||
78
problems/burgers.py
Normal file
78
problems/burgers.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import numpy as np
|
||||
import scipy.io
|
||||
import torch
|
||||
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.tdproblem1d import TimeDepProblem1D
|
||||
|
||||
def tmp_grad(output_, input_):
|
||||
return torch.autograd.grad(
|
||||
output_,
|
||||
input_.tensor,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.tensor.dtype,
|
||||
device=input_.tensor.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
|
||||
class Burgers1D(TimeDepProblem1D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
|
||||
def burger_equation(input_, output_):
|
||||
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
grad_x, grad_t = tmp_grad(output_['u'], input_).T
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
grad_xx = tmp_grad(grad_x, input_)[:, 0]
|
||||
#print(grad_t, grad_u['t'])
|
||||
|
||||
#rrrr
|
||||
return grad_u['t'] + output_['u']*grad_u['x'] - (0.01/torch.pi)*gradgrad_u_x['x']
|
||||
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
u_expected = 0.0
|
||||
return output_['u'] - u_expected
|
||||
|
||||
def initial_condition(input_, output_):
|
||||
u_expected = -torch.sin(torch.pi*input_['x'])
|
||||
return output_['u'] - u_expected
|
||||
|
||||
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((-1, 0), (-1, 1)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment(( 1, 0), ( 1, 1)), 'func': nil_dirichlet},
|
||||
'initia': {'location': Segment((-1, 0), ( 1, 0)), 'func': initial_condition},
|
||||
'D': {'location': Cube([[-1, 1],[0,1]]), 'func': burger_equation}
|
||||
}
|
||||
|
||||
self.input_variables = ['x', 't']
|
||||
self.output_variables = ['u']
|
||||
self.spatial_domain = Cube([[0, 1]])
|
||||
self.temporal_domain = Cube([[0, 1]])
|
||||
|
||||
bc = (
|
||||
(-1, lambda x: torch.zeros(x.shape[0], 1)),
|
||||
( 1, lambda x: torch.zeros(x.shape[0], 1))
|
||||
)
|
||||
|
||||
initial = lambda x: -np.sin(np.pi*x[:,0]).reshape(-1, 1)
|
||||
|
||||
def equation(x, fx):
|
||||
grad_x, grad_t = Problem.grad(fx, x).T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
a = grad_t + fx.flatten()*grad_x - (0.01/torch.pi)*grad_xx
|
||||
return a
|
||||
|
||||
|
||||
burgers = TimeDepProblem1D(bc=bc, initial=initial, tend=1, domain_bound=[-1, 1])
|
||||
burgers.equation = equation
|
||||
|
||||
# read data for errors and plots
|
||||
data = scipy.io.loadmat('Data/burgers_shock.mat')
|
||||
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
|
||||
burgers.data_solution = data_solution
|
||||
49
problems/elliptic_optimal_control.py
Normal file
49
problems/elliptic_optimal_control.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
|
||||
xmin, xmax, ymin, ymax = -1, 1, -1, 1
|
||||
|
||||
class EllipticOptimalControl(Problem2D):
|
||||
|
||||
def __init__(self, alpha=1):
|
||||
|
||||
def term1(input_, output_):
|
||||
grad_p = self.grad(output_['p'], input_)
|
||||
gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
|
||||
gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
|
||||
yd = 2.0
|
||||
return output_['y'] - yd - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
|
||||
|
||||
def term2(input_, output_):
|
||||
grad_y = self.grad(output_['y'], input_)
|
||||
gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
|
||||
gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
|
||||
return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u']
|
||||
|
||||
def term3(input_, output_):
|
||||
return output_['p'] - output_['u']*alpha
|
||||
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
y_value = 0.0
|
||||
p_value = 0.0
|
||||
return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
|
||||
'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': [term1, term2, term3]},
|
||||
#'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
|
||||
#'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
|
||||
}
|
||||
|
||||
self.input_variables = ['x1', 'x2']
|
||||
self.output_variables = ['u', 'p', 'y']
|
||||
self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
|
||||
|
||||
55
problems/laplacian_optimal_control_parametric.py
Normal file
55
problems/laplacian_optimal_control_parametric.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.parametricproblem2d import ParametricProblem2D
|
||||
|
||||
bc = {
|
||||
'y': (
|
||||
(Segment((0, 0), (4, 0)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((4, 0), (4, 1)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((4, 1), (0, 1)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
(Segment((0, 1), (0, 0)), lambda x: torch.ones(x.shape[0], 1)),
|
||||
),
|
||||
'p': (
|
||||
(Segment((0, 0), (4, 0)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((4, 0), (4, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((4, 1), (0, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((0, 1), (0, 0)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
)
|
||||
}
|
||||
|
||||
# optimal control parameters and data
|
||||
alpha = 1e-5
|
||||
# yd = 10*x[:, 0]*(1-x[:, 0])*x[:, 1]*(1-x[:, 1])
|
||||
# three variables
|
||||
# state y = f[0]
|
||||
# control u = f[1]
|
||||
# adjoint p = f[2]
|
||||
|
||||
# the three variables
|
||||
|
||||
def adjoint_eq(x, f):
|
||||
grad_x, grad_y = Problem.grad(f['p'], x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
return - grad_xx - grad_yy - f['y'] + 1*(x[:, 0] <= 1) + x[:, 2]*(x[:, 0] > 1)
|
||||
|
||||
def control_eq(x, f):
|
||||
return alpha*f['u'] - f['p']
|
||||
|
||||
def state_eq(x, f):
|
||||
grad_x, grad_y = Problem.grad(f['y'], x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
return - grad_xx - grad_yy - f['u']
|
||||
|
||||
def equation(x, f):
|
||||
return state_eq(x, f) + control_eq(x, f) + adjoint_eq(x, f)
|
||||
|
||||
laplace = ParametricProblem2D(
|
||||
variables=['y', 'u', 'p'],
|
||||
bc=bc,
|
||||
domain_bound=np.array([[0, 4],[0, 1]]),
|
||||
params_bound=np.array([[0.5, 2.5]]))
|
||||
laplace.equation = equation
|
||||
29
problems/laplacian_parametric.py
Normal file
29
problems/laplacian_parametric.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.parametricproblem2d import ParametricProblem2D
|
||||
|
||||
bc = (
|
||||
(Segment((-1, -1), ( 1, -1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment(( 1, -1), ( 1, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment(( 1, 1), (-1, 1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
(Segment((-1, 1), (-1, -1)), lambda x: torch.zeros(x.shape[0], 1)),
|
||||
)
|
||||
|
||||
params_domain = np.array([
|
||||
[-1.0, 1.0],
|
||||
[-1.0, 1.0]])
|
||||
|
||||
def equation(x, fx):
|
||||
grad_x, grad_y = Problem.grad(fx, x)[:, :2].T
|
||||
grad_xx = Problem.grad(grad_x, x)[:, 0]
|
||||
grad_yy = Problem.grad(grad_y, x)[:, 1]
|
||||
a = grad_xx + grad_yy - torch.exp(- 2*(x[:, 0] - x[:, 2])**2 - 2*(x[:, 1] - x[:, 3])**2)
|
||||
return a
|
||||
|
||||
|
||||
laplace = ParametricProblem2D(bc=bc, domain_bound=params_domain, params_bound=params_domain)
|
||||
|
||||
laplace.equation = equation
|
||||
|
||||
53
problems/parametric_elliptic_optimal_control.py
Normal file
53
problems/parametric_elliptic_optimal_control.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
|
||||
xmin, xmax, ymin, ymax = -1, 1, -1, 1
|
||||
|
||||
class ParametricEllipticOptimalControl(Problem2D):
|
||||
|
||||
def __init__(self, alpha=1):
|
||||
|
||||
def term1(input_, param_, output_):
|
||||
grad_p = self.grad(output_['p'], input_)
|
||||
gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
|
||||
gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
|
||||
return output_['y'] - param_ - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
|
||||
|
||||
def term2(input_, param_, output_):
|
||||
grad_y = self.grad(output_['y'], input_)
|
||||
gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
|
||||
gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
|
||||
return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param']
|
||||
|
||||
def term3(input_, param_, output_):
|
||||
return output_['p'] - output_['u_param']*alpha
|
||||
|
||||
|
||||
def term(input_, param_, output_):
|
||||
return term1( input_, param_, output_) +term2( input_, param_, output_) + term3( input_, param_, output_)
|
||||
|
||||
def nil_dirichlet(input_, param_, output_):
|
||||
y_value = 0.0
|
||||
p_value = 0.0
|
||||
return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
|
||||
'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': term},
|
||||
#'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
|
||||
#'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
|
||||
}
|
||||
|
||||
self.input_variables = ['x1', 'x2']
|
||||
self.output_variables = ['u', 'p', 'y']
|
||||
self.parameters = ['mu']
|
||||
self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
|
||||
self.parameter_domain = np.array([[0.5, 3]])
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.problem import Problem
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
|
||||
xmin, xmax, ymin, ymax = -1, 1, -1, 1
|
||||
|
||||
class ParametricEllipticOptimalControl(Problem2D):
|
||||
|
||||
def __init__(self, alpha=1):
|
||||
|
||||
def term1(input_, param_, output_):
|
||||
grad_p = self.grad(output_['p'], input_)
|
||||
gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
|
||||
gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
|
||||
#print('mu', input_['mu'])
|
||||
return output_['y'] - input_['mu'] - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
|
||||
|
||||
def term2(input_, param_, output_):
|
||||
grad_y = self.grad(output_['y'], input_)
|
||||
gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
|
||||
gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
|
||||
return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param']
|
||||
|
||||
def term3(input_, param_, output_):
|
||||
#print('a', input_['alpha'], output_['p'], output_['u_param'])
|
||||
return output_['p'] - output_['u_param']*input_['alpha']
|
||||
|
||||
|
||||
def nil_dirichlet(input_, param_, output_):
|
||||
y_value = 0.0
|
||||
p_value = 0.0
|
||||
return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
|
||||
'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': [term1, term2]},
|
||||
#'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
|
||||
#'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
|
||||
}
|
||||
|
||||
self.input_variables = ['x1', 'x2']
|
||||
self.output_variables = ['u', 'p', 'y']
|
||||
self.parameters = ['mu', 'alpha']
|
||||
self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
|
||||
self.parameter_domain = np.array([[0.5, 3], [0.0001, 1]])
|
||||
|
||||
43
problems/parametric_poisson.py
Normal file
43
problems/parametric_poisson.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
from pina.problem import Problem
|
||||
|
||||
|
||||
class ParametricPoisson2DProblem(Problem2D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
def laplace_equation(input_, param_, output_):
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
gradgrad_u_y = self.grad(grad_u['y'], input_)
|
||||
force_term = torch.exp(
|
||||
- 2*(input_['x'] - input_['mu1'])**2 -
|
||||
2*(input_['y'] - input_['mu2'])**2
|
||||
)
|
||||
return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term
|
||||
|
||||
def nil_dirichlet(input_, param_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((-1, -1), ( 1, -1)),'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment(( 1, -1), ( 1, 1)),'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment(( 1, 1), (-1, 1)),'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((-1, 1), (-1, -1)),'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[-1, 1], [-1, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
self.input_variables = ['x', 'y']
|
||||
self.output_variables = ['u']
|
||||
self.parameters = ['mu1', 'mu2']
|
||||
#self.truth_solution = poisson_sol
|
||||
self.spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
self.parameter_domain = np.array([[-1, 1], [-1, 1]])
|
||||
|
||||
|
||||
#self.check() # Check the problem is correctly set
|
||||
41
problems/poisson2D.py
Normal file
41
problems/poisson2D.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
from pina.problem import Problem
|
||||
|
||||
|
||||
class Poisson2DProblem(Problem2D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
gradgrad_u_y = self.grad(grad_u['y'], input_)
|
||||
force_term = (torch.sin(input_['x']*torch.pi)
|
||||
* torch.sin(input_['y']*torch.pi))
|
||||
return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
def poisson_sol(x, y):
|
||||
return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
|
||||
|
||||
self.input_variables = ['x', 'y']
|
||||
self.output_variables = ['u']
|
||||
self.truth_solution = poisson_sol
|
||||
self.spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
|
||||
#self.check() # Check the problem is correctly set
|
||||
43
problems/poisson_2.py
Normal file
43
problems/poisson_2.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from pina.segment import Segment
|
||||
from pina.cube import Cube
|
||||
from pina.problem2d import Problem2D
|
||||
from pina.problem import Problem
|
||||
|
||||
|
||||
class Poisson2DProblem(Problem2D):
|
||||
|
||||
def __init__(self):
|
||||
|
||||
def laplace_equation(input_, output_):
|
||||
grad_u = self.grad(output_['u'], input_)
|
||||
gradgrad_u_x = self.grad(grad_u['x'], input_)
|
||||
gradgrad_u_y = self.grad(grad_u['y'], input_)
|
||||
#force_term = (torch.sin(input_['x']*torch.pi)
|
||||
# * torch.sin(input_['y']*torch.pi))
|
||||
force_term = -2*(input_['y']*(1-input_['y']) +
|
||||
input_['x']*(1-input_['x']))
|
||||
return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term
|
||||
|
||||
def nil_dirichlet(input_, output_):
|
||||
value = 0.0
|
||||
return output_['u'] - value
|
||||
|
||||
self.conditions = {
|
||||
'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet},
|
||||
'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet},
|
||||
'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet},
|
||||
'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet},
|
||||
'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation}
|
||||
}
|
||||
|
||||
def poisson_sol(x, y):
|
||||
return x*(1-x)*y*(1-y)
|
||||
|
||||
self.input_variables = ['x', 'y']
|
||||
self.output_variables = ['u']
|
||||
self.truth_solution = poisson_sol
|
||||
self.spatial_domain = Cube([[0, 1], [0, 1]])
|
||||
|
||||
#self.check() # Check the problem is correctly set
|
||||
Reference in New Issue
Block a user