tmp commit - toward 0.0.1

This commit is contained in:
Your Name
2021-11-29 15:29:00 +01:00
parent beae301a58
commit fb16fc7f3a
38 changed files with 2790 additions and 1 deletions

3
pina/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from .pinn import PINN
from .deep_feed_forward import DeepFeedForward
from .problem1d import Problem1D

View File

@@ -0,0 +1,6 @@
from .adaptive_tanh import AdaptiveTanh
from .adaptive_sin import AdaptiveSin
from .adaptive_cos import AdaptiveCos
from .adaptive_linear import AdaptiveLinear
from .adaptive_square import AdaptiveSquare

View File

@@ -0,0 +1,50 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveCos(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, alpha = None):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveCos, self).__init__()
#self.in_features = in_features
# initialize alpha
if alpha == None:
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
else:
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.scale = Parameter(torch.tensor(1.0))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.tensor(0.0))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (torch.cos(self.alpha * x + self.translate))

View File

@@ -0,0 +1,45 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveExp(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveExp, self).__init__()
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
self.scale.requiresGrad = True # set requiresGrad to true!
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) # create a tensor out of alpha
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (x + self.translate)

View File

@@ -0,0 +1,42 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveLinear(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveLinear, self).__init__()
self.scale = Parameter(torch.tensor(1.0))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.tensor(0.0))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (x + self.translate)

View File

@@ -0,0 +1,43 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveReLU(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveReLU, self).__init__()
self.scale = Parameter(torch.rand(1))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.rand(1))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
#x += self.translate
return torch.relu(x+self.translate)*self.scale

View File

@@ -0,0 +1,46 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveSin(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, alpha = None):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveSin, self).__init__()
# initialize alpha
self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1)))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1)))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (torch.sin(self.alpha * x + self.translate))

View File

@@ -0,0 +1,42 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveSoftplus(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super().__init__()
self.soft = torch.nn.Softplus()
self.scale = Parameter(torch.rand(1))
self.scale.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
#x += self.translate
return self.soft(x)*self.scale

View File

@@ -0,0 +1,42 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveSquare(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, alpha = None):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveSquare, self).__init__()
self.scale = Parameter(torch.tensor(1.0))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.tensor(0.0))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
return self.scale * (x + self.translate)**2

View File

@@ -0,0 +1,52 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveTanh(torch.nn.Module):
'''
Implementation of soft exponential activation.
Shape:
- Input: (N, *) where * means, any number of additional
dimensions
- Output: (N, *), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- See related paper:
https://arxiv.org/pdf/1602.01321.pdf
Examples:
>>> a1 = soft_exponential(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, alpha = None):
'''
Initialization.
INPUT:
- in_features: shape of the input
- aplha: trainable parameter
aplha is initialized with zero value by default
'''
super(AdaptiveTanh, self).__init__()
#self.in_features = in_features
# initialize alpha
if alpha == None:
self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha
else:
self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha
self.alpha.requiresGrad = True # set requiresGrad to true!
self.scale = Parameter(torch.tensor(1.0))
self.scale.requiresGrad = True # set requiresGrad to true!
self.translate = Parameter(torch.tensor(0.0))
self.translate.requiresGrad = True # set requiresGrad to true!
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
'''
x += self.translate
return self.scale * (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))/(torch.exp(self.alpha * x) + torch.exp(-self.alpha * x))

6
pina/chebyshev.py Normal file
View File

@@ -0,0 +1,6 @@
from mpmath import chebyt, chop, taylor
import numpy as np
def chebyshev_roots(n):
""" Return the roots of *n* Chebyshev polynomials (between [-1, 1]) """
return np.roots(chop(taylor(lambda x: chebyt(n, x), 0, n))[::-1])

29
pina/cube.py Normal file
View File

@@ -0,0 +1,29 @@
import numpy as np
from .chebyshev import chebyshev_roots
class Cube():
def __init__(self, bound):
self.bound = np.asarray(bound)
def discretize(self, n, mode='random'):
if mode == 'random':
pts = np.random.uniform(size=(n, self.bound.shape[0]))
elif mode == 'chebyshev':
pts = np.array([chebyshev_roots(n) *.5 + .5 for _ in range(self.bound.shape[0])])
grids = np.meshgrid(*pts)
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
elif mode == 'grid':
pts = np.array([np.linspace(0, 1, n) for _ in range(self.bound.shape[0])])
grids = np.meshgrid(*pts)
pts = np.hstack([grid.reshape(-1, 1) for grid in grids])
elif mode == 'lh' or mode == 'latin':
from scipy.stats import qmc
sampler = qmc.LatinHypercube(d=self.bound.shape[0])
pts = sampler.random(n)
# Scale pts
pts *= self.bound[:, 1] - self.bound[:, 0]
pts += self.bound[:, 0]
return pts

83
pina/deep_feed_forward.py Normal file
View File

@@ -0,0 +1,83 @@
from .problem import Problem
import torch
import torch.nn as nn
import numpy as np
from .cube import Cube
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
from torch.nn import Tanh, ReLU
#import torch.nn.utils.prune as prune
from pina.adaptive_functions import AdaptiveLinear
from pina.label_tensor import LabelTensor
class DeepFeedForward(torch.nn.Module):
def __init__(self,
inner_size=20,
n_layers=2,
func=nn.Tanh,
input_variables=None,
output_variables=None,
layers=None,
extra_features=None):
'''
'''
super(DeepFeedForward, self).__init__()
if extra_features is None:
extra_features = []
self.extra_features = nn.Sequential(*extra_features)
if input_variables is None: input_variables = ['x']
if output_variables is None: input_variables = ['y']
self.input_variables = input_variables
self.input_dimension = len(input_variables)
self.output_variables = output_variables
self.output_dimension = len(output_variables)
n_features = len(extra_features)
if layers is None: layers = [inner_size] * n_layers
tmp_layers = layers.copy()
tmp_layers.insert(0, self.input_dimension+n_features)#-1)
tmp_layers.append(self.output_dimension)
self.layers = []
for i in range(len(tmp_layers)-1):
self.layers.append(nn.Linear(tmp_layers[i], tmp_layers[i+1]))
if isinstance(func, list):
self.functions = func
else:
self.functions = [func for _ in range(len(self.layers)-1)]
unique_list = []
for layer, func in zip(self.layers[:-1], self.functions):
unique_list.append(layer)
if func is not None: unique_list.append(func())
unique_list.append(self.layers[-1])
self.model = nn.Sequential(*unique_list)
def forward(self, x):
nf = len(self.extra_features)
if nf == 0:
return LabelTensor(self.model(x), self.output_variables)
# if self.extra_features
#input_ = torch.zeros(x.shape[0], nf+self.input_dimension, dtype=x.dtype, device=x.device)
input_ = torch.zeros(x.shape[0], nf+x.shape[1], dtype=x.dtype, device=x.device)
input_[:, :x.shape[1]] = x
for i, feature in enumerate(self.extra_features, start=self.input_dimension):
input_[:, i] = feature(x)
return LabelTensor(self.model(input_), self.output_variables)

49
pina/label_tensor.py Normal file
View File

@@ -0,0 +1,49 @@
import torch
class LabelTensor():
def __init__(self, x, labels):
if len(labels) != x.shape[1]:
print(len(labels), x.shape[1])
raise ValueError
self.__labels = labels
self.tensor = x
def __getitem__(self, key):
if key in self.labels:
return self.tensor[:, self.labels.index(key)]
else:
return self.tensor.__getitem__(key)
def __repr__(self):
return self.tensor
def __str__(self):
return self.tensor, self.labels
@property
def labels(self):
return self.__labels
@staticmethod
def hstack(labeltensor_list):
concatenated_tensor = torch.cat([lt.tensor for lt in labeltensor_list], axis=1)
concatenated_label = sum([lt.labels for lt in labeltensor_list], [])
return LabelTensor(concatenated_tensor, concatenated_label)
if __name__ == "__main__":
import numpy as np
a = np.random.uniform(size=(20, 3))
a = np.random.uniform(size=(20, 3))
p = torch.from_numpy(a)
t = LabelTensor(p, labels=['u', 'p', 't'])
print(t)
print(t['u'])
t *= 2
print(t['u'])
print(t[:, 0])

View File

@@ -0,0 +1,27 @@
from .problem import Problem
import torch
import torch.nn as nn
import numpy as np
from .cube import Cube
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
from torch.nn import Tanh, ReLU
import torch.nn.utils.prune as prune
from pina.adaptive_functions import AdaptiveLinear
from pina.deep_feed_forward import DeepFeedForward
class MultiDeepFeedForward(torch.nn.Module):
def __init__(self, dff_dict):
'''
'''
super().__init__()
if not isinstance(dff_dict, dict):
raise TypeError
for name, constructor_args in dff_dict.items():
setattr(self, name, DeepFeedForward(**constructor_args))

View File

@@ -0,0 +1,9 @@
from .problem2d import Problem2D
import numpy as np
class ParametricProblem2D(Problem2D):
def __init__(self, variables=None, bc=None, params_bound=None, domain_bound=None):
Problem2D.__init__(self, variables=variables, bc=bc, domain_bound=domain_bound)
self.params_domain = params_bound

488
pina/pinn.py Normal file
View File

@@ -0,0 +1,488 @@
from mpmath import chebyt, chop, taylor
from .problem import Problem
import torch
import torch.nn as nn
import numpy as np
from .cube import Cube
from .segment import Segment
from .deep_feed_forward import DeepFeedForward
from pina.label_tensor import LabelTensor
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
class PINN(object):
def __init__(self,
problem,
model,
optimizer=torch.optim.Adam,
lr=0.001,
regularizer=0.00001,
data_weight=1.,
dtype=torch.float64,
device='cpu',
lr_accelerate=None,
error_norm='mse'):
'''
:param Problem problem: the formualation of the problem.
:param dict architecture: a dictionary containing the information to
build the model. Valid options are:
- inner_size [int] the number of neurons in the hidden layers; by
default is 20.
- n_layers [int] the number of hidden layers; by default is 4.
- func [nn.Module or str] the activation function; passing a `str`
is possible to chose adaptive function (between 'adapt_tanh'); by
default is non-adaptive iperbolic tangent.
:param float lr: the learning rate; default is 0.001
:param float regularizer: the coefficient for L2 regularizer term
:param type dtype: the data type to use for the model. Valid option are
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
default is `torch.float64`.
:param float lr_accelete: the coefficient that controls the learning
rate increase, such that, for all the epoches in which the loss is
decreasing, the learning_rate is update using
$learning_rate = learning_rate * lr_accelerate$.
When the loss stops to decrease, the learning rate is set to the
initial value [TODO test parameters]
'''
self.problem = problem
# self._architecture = architecture if architecture else dict()
# self._architecture['input_dimension'] = self.problem.domain_bound.shape[0]
# self._architecture['output_dimension'] = len(self.problem.variables)
# if hasattr(self.problem, 'params_domain'):
# self._architecture['input_dimension'] += self.problem.params_domain.shape[0]
self.accelerate = lr_accelerate
self.error_norm = error_norm
if device == 'cuda' and not torch.cuda.is_available():
raise RuntimeError
self.device = torch.device(device)
self.dtype = dtype
self.history = []
self.model = model
self.model.to(dtype=self.dtype, device=self.device)
self.input_pts = {}
self.truth_values = {}
self.trained_epoch = 0
self.optimizer = optimizer(
self.model.parameters(), lr=lr, weight_decay=regularizer)
self.data_weight = data_weight
@property
def problem(self):
return self._problem
@problem.setter
def problem(self, problem):
if not isinstance(problem, Problem):
raise TypeError
self._problem = problem
def get_data_residuals(self):
data_residuals = []
for output in self.data_pts:
data_values_pred = self.model(self.data_pts[output])
data_residuals.append(data_values_pred - self.data_values[output])
return torch.cat(data_residuals)
def get_phys_residuals(self):
"""
"""
residuals = []
for equation in self.problem.equation:
residuals.append(equation(self.phys_pts, self.model(self.phys_pts)))
return residuals
def _compute_norm(self, vec):
"""
Compute the norm of the `vec` one-dimensional tensor based on the
`self.error_norm` attribute.
.. todo: complete
:param vec torch.tensor: the tensor
"""
if isinstance(self.error_norm, int):
return torch.sum(torch.abs(vec**self.error_norm))**(1./self.error_norm)
elif self.error_norm == 'mse':
return torch.mean(vec**2)
elif self.error_norm == 'me':
return torch.mean(torch.abs(vec))
else:
raise RuntimeError
def save_state(self, filename):
checkpoint = {
'epoch': self.trained_epoch,
'model_state': self.model.state_dict(),
'optimizer_state' : self.optimizer.state_dict(),
'optimizer_class' : self.optimizer.__class__,
'history' : self.history,
}
# TODO save also architecture param?
#if isinstance(self.model, DeepFeedForward):
# checkpoint['model_class'] = self.model.__class__
# checkpoint['model_structure'] = {
# }
torch.save(checkpoint, filename)
def load_state(self, filename):
checkpoint = torch.load(filename)
self.model.load_state_dict(checkpoint['model_state'])
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
self.trained_epoch = checkpoint['epoch']
self.history = checkpoint['history']
print(self.history)
return self
def span_pts(self, n, mode='grid', locations='all'):
'''
'''
if locations == 'all':
locations = [condition for condition in self.problem.conditions]
for location in locations:
manifold, func = self.problem.conditions[location].values()
if torch.is_tensor(manifold):
pts = manifold
else:
pts = manifold.discretize(n, mode)
pts = torch.from_numpy(pts)
self.input_pts[location] = LabelTensor(pts, self.problem.input_variables)
self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device)
self.input_pts[location].tensor.requires_grad_(True)
self.input_pts[location].tensor.retain_grad()
def plot_pts(self, locations='all'):
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
if locations == 'all':
locations = [condition for condition in self.problem.conditions]
for location in locations:
x, y = self.input_pts[location].tensor.T
#plt.plot(x.detach(), y.detach(), 'o', label=location)
np.savetxt('burgers_{}_pts.txt'.format(location), self.input_pts[location].tensor.detach(), header='x y', delimiter=' ')
gggg
plt.legend()
plt.show()
def train(self, stop=100, frequency_print=2, trial=None):
epoch = 0
while True:
losses = []
for condition_name in self.problem.conditions:
pts = self.input_pts[condition_name]
predicted = self.model(pts.tensor)
if isinstance(self.problem.conditions[condition_name]['func'], list):
for func in self.problem.conditions[condition_name]['func']:
residuals = func(pts, predicted)
losses.append(self._compute_norm(residuals))
else:
residuals = self.problem.conditions[condition_name]['func'](pts, predicted)
losses.append(self._compute_norm(residuals))
#print(condition_name, losses[-1])
self.optimizer.zero_grad()
sum(losses).backward()
self.optimizer.step()
self.trained_epoch += 1
if epoch % 50 == 0:
self.history.append([loss.detach().item() for loss in losses])
epoch += 1
if trial:
import optuna
trial.report(loss[0].item()+loss[1].item(), epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
if isinstance(stop, int):
if epoch == stop:
break
elif isinstance(stop, float):
if sum(losses) < stop:
break
if epoch % frequency_print == 0:
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
for loss in losses:
print('{:.6e} '.format(loss), end='')
print()
return sum(losses).item()
def error(self, dtype='l2', res=100):
import numpy as np
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
try:
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device)
Z_pred = self.model(unrolled_pts)
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
if dtype == 'l2':
return np.linalg.norm(Z_pred - Z_true)/np.linalg.norm(Z_true)
else:
# TODO H1
pass
except:
print("")
print("Something went wrong...")
print("Not able to compute the error. Please pass a data solution or a true solution")
def plot(self, res, filename=None, variable=None):
'''
'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
pts_container = []
#for mn, mx in [[-1, 1], [-1, 1]]:
for mn, mx in [[0, 1], [0, 1]]:
#for mn, mx in [[-1, 1], [0, 1]]:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
unrolled_pts.to(dtype=self.dtype)
Z_pred = self.model(unrolled_pts)
#######################################################
# poisson
# Z_truth = self.problem.truth_solution(unrolled_pts[:, 0], unrolled_pts[:, 1])
# Z_pred = Z_pred.tensor.detach().reshape(grids_container[0].shape)
# Z_truth = Z_truth.detach().reshape(grids_container[0].shape)
# err = np.abs(Z_pred-Z_truth)
# with open('poisson2_nofeat_plot.txt', 'w') as f_:
# f_.write('x y truth pred e\n')
# for (x, y), tru, pre, e in zip(unrolled_pts, Z_truth.reshape(-1, 1), Z_pred.reshape(-1, 1), err.reshape(-1, 1)):
# f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
# n = Z_pred.shape[1]
# plt.figure(figsize=(16, 6))
# plt.subplot(1, 3, 1)
# plt.contourf(*grids_container, Z_truth)
# plt.colorbar()
# plt.subplot(1, 3, 2)
# plt.contourf(*grids_container, Z_pred)
# plt.colorbar()
# plt.subplot(1, 3, 3)
# plt.contourf(*grids_container, err)
# plt.colorbar()
# plt.show()
#######################################################
# burgers
import scipy
data = scipy.io.loadmat('Data/burgers_shock.mat')
data_solution = {'grid': np.meshgrid(data['x'], data['t']), 'grid_solution': data['usol'].T}
grids_container = data_solution['grid']
print(data_solution['grid_solution'].shape)
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
unrolled_pts.to(dtype=self.dtype)
Z_pred = self.model(unrolled_pts)
Z_truth = data_solution['grid_solution']
Z_pred = Z_pred.tensor.detach().reshape(grids_container[0].shape)
print(Z_pred, Z_truth)
err = np.abs(Z_pred.numpy()-Z_truth)
with open('burgers_nofeat_plot.txt', 'w') as f_:
f_.write('x y truth pred e\n')
for (x, y), tru, pre, e in zip(unrolled_pts, Z_truth.reshape(-1, 1), Z_pred.reshape(-1, 1), err.reshape(-1, 1)):
f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item()))
n = Z_pred.shape[1]
plt.figure(figsize=(16, 6))
plt.subplot(1, 3, 1)
plt.contourf(*grids_container, Z_truth,vmin=-1, vmax=1)
plt.colorbar()
plt.subplot(1, 3, 2)
plt.contourf(*grids_container, Z_pred, vmin=-1, vmax=1)
plt.colorbar()
plt.subplot(1, 3, 3)
plt.contourf(*grids_container, err)
plt.colorbar()
plt.show()
# for i, output in enumerate(Z_pred.tensor.T, start=1):
# output = output.detach().numpy().reshape(grids_container[0].shape)
# plt.subplot(1, n, i)
# plt.contourf(*grids_container, output)
# plt.colorbar()
if filename is None:
plt.show()
else:
plt.savefig(filename)
def plot_params(self, res, param, filename=None, variable=None):
'''
'''
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
n_plot = 2
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
n_plot = 2
else:
n_plot = 1
fig, axs = plt.subplots(nrows=1, ncols=n_plot, figsize=(n_plot*6,4))
if not isinstance(axs, np.ndarray): axs = [axs]
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
#print(unrolled_pts)
#print(param)
param_unrolled_pts = torch.cat((unrolled_pts, param.repeat(unrolled_pts.shape[0], 1)), 1)
if variable==None:
variable = self.problem.variables[0]
Z_pred = self.evaluate(param_unrolled_pts)[variable]
variable = "Solution"
else:
Z_pred = self.evaluate(param_unrolled_pts)[variable]
Z_pred= Z_pred.detach().numpy().reshape(grids_container[0].shape)
set_pred = axs[0].contourf(*grids_container, Z_pred)
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + " " + variable) #TODO add info about parameter in the title
fig.colorbar(set_pred, ax=axs[0])
if n_plot == 2:
set_true = axs[1].contourf(*grids_container, Z_true)
axs[1].set_title('Truth solution')
fig.colorbar(set_true, ax=axs[1])
if filename is None:
plt.show()
else:
fig.savefig(filename + " " + variable)
def plot_error(self, res, filename=None):
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(6,4))
if not isinstance(axs, np.ndarray): axs = [axs]
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
try:
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
Z_pred = self.model(unrolled_pts)
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
set_pred = axs[0].contourf(*grids_container, abs(Z_pred - Z_true))
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + "Pointwise Error")
fig.colorbar(set_pred, ax=axs[0])
if filename is None:
plt.show()
else:
fig.savefig(filename)
except:
print("")
print("Something went wrong...")
print("Not able to plot the error. Please pass a data solution or a true solution")
'''
print(self.pred_loss.item(),loss.item(), self.old_loss.item())
if self.accelerate is not None:
if self.pred_loss > loss and loss >= self.old_loss:
self.current_lr = self.original_lr
#print('restart')
elif (loss-self.pred_loss).item() < 0.1:
self.current_lr += .5*self.current_lr
#print('powa')
else:
self.current_lr -= .5*self.current_lr
#print(self.current_lr)
#self.current_lr = min(loss.item()*3, 0.02)
for g in self.optimizer.param_groups:
g['lr'] = self.current_lr
'''

465
pina/ppinn.py Normal file
View File

@@ -0,0 +1,465 @@
from mpmath import chebyt, chop, taylor
from .problem import Problem
import torch
import torch.nn as nn
import numpy as np
from .cube import Cube
from .deep_feed_forward import DeepFeedForward
from pina.label_tensor import LabelTensor
from pina.pinn import PINN
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
class ParametricPINN(PINN):
def __init__(self,
problem,
model,
optimizer=torch.optim.Adam,
lr=0.001,
regularizer=0.00001,
data_weight=1.,
dtype=torch.float64,
device='cpu',
lr_accelerate=None,
error_norm='mse'):
'''
:param Problem problem: the formualation of the problem.
:param dict architecture: a dictionary containing the information to
build the model. Valid options are:
- inner_size [int] the number of neurons in the hidden layers; by
default is 20.
- n_layers [int] the number of hidden layers; by default is 4.
- func [nn.Module or str] the activation function; passing a `str`
is possible to chose adaptive function (between 'adapt_tanh'); by
default is non-adaptive iperbolic tangent.
:param float lr: the learning rate; default is 0.001
:param float regularizer: the coefficient for L2 regularizer term
:param type dtype: the data type to use for the model. Valid option are
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
default is `torch.float64`.
:param float lr_accelete: the coefficient that controls the learning
rate increase, such that, for all the epoches in which the loss is
decreasing, the learning_rate is update using
$learning_rate = learning_rate * lr_accelerate$.
When the loss stops to decrease, the learning rate is set to the
initial value [TODO test parameters]
'''
self.problem = problem
# self._architecture = architecture if architecture else dict()
# self._architecture['input_dimension'] = self.problem.domain_bound.shape[0]
# self._architecture['output_dimension'] = len(self.problem.variables)
# if hasattr(self.problem, 'params_domain'):
# self._architecture['input_dimension'] += self.problem.params_domain.shape[0]
self.accelerate = lr_accelerate
self.error_norm = error_norm
if device == 'cuda' and not torch.cuda.is_available():
raise RuntimeError
self.device = torch.device(device)
self.dtype = dtype
self.history = []
self.model = model
self.model.to(dtype=self.dtype, device=self.device)
self.input_pts = {}
self.truth_values = {}
self.trained_epoch = 0
self.optimizer = optimizer(
self.model.parameters(), lr=lr, weight_decay=regularizer)
self.data_weight = data_weight
@property
def problem(self):
return self._problem
@problem.setter
def problem(self, problem):
if not isinstance(problem, Problem):
raise TypeError
self._problem = problem
def get_data_residuals(self):
data_residuals = []
for output in self.data_pts:
data_values_pred = self.model(self.data_pts[output])
data_residuals.append(data_values_pred - self.data_values[output])
return torch.cat(data_residuals)
def get_phys_residuals(self):
"""
"""
residuals = []
for equation in self.problem.equation:
residuals.append(equation(self.phys_pts, self.model(self.phys_pts)))
return residuals
def _compute_norm(self, vec):
"""
Compute the norm of the `vec` one-dimensional tensor based on the
`self.error_norm` attribute.
.. todo: complete
:param vec torch.tensor: the tensor
"""
if isinstance(self.error_norm, int):
return torch.sum(torch.abs(vec**self.error_norm))**(1./self.error_norm)
elif self.error_norm == 'mse':
return torch.mean(vec**2)
elif self.error_norm == 'me':
return torch.mean(torch.abs(vec))
else:
raise RuntimeError
def save_state(self, filename):
checkpoint = {
'epoch': self.trained_epoch,
'model_state': self.model.state_dict(),
'optimizer_state' : self.optimizer.state_dict(),
'optimizer_class' : self.optimizer.__class__,
}
# TODO save also architecture param?
#if isinstance(self.model, DeepFeedForward):
# checkpoint['model_class'] = self.model.__class__
# checkpoint['model_structure'] = {
# }
torch.save(checkpoint, filename)
def load_state(self, filename):
checkpoint = torch.load(filename)
self.model.load_state_dict(checkpoint['model_state'])
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
self.trained_epoch = checkpoint['epoch']
return self
def span_pts(self, n, mode='grid', locations='all'):
'''
'''
if locations == 'all':
locations = [condition for condition in self.problem.conditions]
for location in locations:
manifold, func = self.problem.conditions[location].values()
if torch.is_tensor(manifold):
pts = manifold
else:
pts = manifold.discretize(n, mode)
pts = torch.from_numpy(pts)
self.input_pts[location] = LabelTensor(pts, self.problem.input_variables)
self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device)
self.input_pts[location].tensor.requires_grad_(True)
self.input_pts[location].tensor.retain_grad()
def train(self, stop=100, frequency_print=2, trial=None):
epoch = 0
## TODO for elliptic
# parameters = torch.cat(torch.linspace(
# self.problem.parameter_domain[0, 0],
# self.problem.parameter_domain[0, 1],
# 5)
## for param laplacian
#parameters = torch.rand(50, 2)*2-1
parameters = torch.from_numpy(Cube([[-1, 1], [-1, 1]]).discretize(5, 'grid'))
# alpha_p = torch.logspace(start=-2, end=0, steps=10)
# mu_p = torch.linspace(0.5, 3, 5)
# g1_, g2_ = torch.meshgrid(alpha_p, mu_p)
# parameters = torch.cat([g2_.reshape(-1, 1), g1_.reshape(-1, 1)], axis=1)
print(parameters)
while True:
losses = []
for condition_name in self.problem.conditions:
pts = self.input_pts[condition_name]
pts = torch.cat([
pts.tensor.repeat_interleave(parameters.shape[0], dim=0),
torch.tile(parameters, (pts.tensor.shape[0], 1))
], axis=1)
pts = LabelTensor(pts, self.problem.input_variables + self.problem.parameters)
predicted = self.model(pts.tensor)
#predicted = self.model(pts)
if isinstance(self.problem.conditions[condition_name]['func'], list):
for func in self.problem.conditions[condition_name]['func']:
residuals = func(pts, None, predicted)
losses.append(self._compute_norm(residuals))
else:
residuals = self.problem.conditions[condition_name]['func'](pts, None, predicted)
losses.append(self._compute_norm(residuals))
self.optimizer.zero_grad()
sum(losses).backward()
self.optimizer.step()
#for p in parameters:
# pts = self.input_pts[condition_name]
# #pts = torch.cat([pts.tensor, p.double().repeat(pts.tensor.shape[0]).reshape(-1, 2)], axis=1)
# #pts = torch.cat([pts.tensor, p.double().repeat(pts.tensor.shape[0]).reshape(-1, 1)], axis=1)
# #print(self.problem.input_variables)
# # print(self.problem.parameters)
# # print(pts.shape)
# print(pts.tensor.repeat_interleave(parameters.shape[0]))
# # print(pts)
# # gg
# a = torch.cat([
# pts.tensor.repeat_interleave(parameters.shape[0], dim=0),
# torch.tile(parameters, (pts.tensor.shape[0], 1))
# ], axis=1)
# for i in a:
# print(i.detach())
# ttt
# pts = LabelTensor(pts, self.problem.input_variables + self.problem.parameters)
# ffff
# print(pts.labels)
# predicted = self.model(pts.tensor)
# #predicted = self.model(pts)
# if isinstance(self.problem.conditions[condition_name]['func'], list):
# for func in self.problem.conditions[condition_name]['func']:
# residuals = func(pts, LabelTensor(p.reshape(1, -1), ['mu', 'alpha']), predicted)
# tmp_losses.append(self._compute_norm(residuals))
# else:
# residuals = self.problem.conditions[condition_name]['func'](pts, p, predicted)
# tmp_losses.append(self._compute_norm(residuals))
#losses.append(sum(tmp_losses))
self.trained_epoch += 1
#if epoch % 10 == 0:
# self.history.append(losses)
epoch += 1
if trial:
import optuna
rial.report(loss[0].item()+loss[1].item(), epoch)
if trial.should_prune():
raise optuna.exceptions.TrialPruned()
if isinstance(stop, int):
if epoch == stop:
break
elif isinstance(stop, float):
if loss[0].item() + loss[1].item() < stop:
break
if epoch % frequency_print == 0:
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
for loss in losses:
print('{:.6e} '.format(loss), end='')
print()
return sum(losses).item()
def error(self, dtype='l2', res=100):
import numpy as np
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
try:
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device)
Z_pred = self.model(unrolled_pts)
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
if dtype == 'l2':
return np.linalg.norm(Z_pred - Z_true)/np.linalg.norm(Z_true)
else:
# TODO H1
pass
except:
print("")
print("Something went wrong...")
print("Not able to compute the error. Please pass a data solution or a true solution")
def plot(self, res, param, filename=None, variable=None):
'''
'''
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
pts_container = []
for mn, mx in [[-1, 1], [-1, 1]]:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T
unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1)
unrolled_pts.to(dtype=self.dtype)
unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2'])
Z_pred = self.model(unrolled_pts.tensor)
n = Z_pred.tensor.shape[1]
plt.figure(figsize=(6*n, 6))
for i, output in enumerate(Z_pred.tensor.T, start=1):
output = output.detach().numpy().reshape(grids_container[0].shape)
plt.subplot(1, n, i)
plt.contourf(*grids_container, output)
plt.colorbar()
if filename is None:
plt.show()
else:
plt.savefig(filename)
def plot_params(self, res, param, filename=None, variable=None):
'''
'''
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
n_plot = 2
elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
n_plot = 2
else:
n_plot = 1
fig, axs = plt.subplots(nrows=1, ncols=n_plot, figsize=(n_plot*6,4))
if not isinstance(axs, np.ndarray): axs = [axs]
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
#print(unrolled_pts)
#print(param)
param_unrolled_pts = torch.cat((unrolled_pts, param.repeat(unrolled_pts.shape[0], 1)), 1)
if variable==None:
variable = self.problem.variables[0]
Z_pred = self.evaluate(param_unrolled_pts)[variable]
variable = "Solution"
else:
Z_pred = self.evaluate(param_unrolled_pts)[variable]
Z_pred= Z_pred.detach().numpy().reshape(grids_container[0].shape)
set_pred = axs[0].contourf(*grids_container, Z_pred)
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + " " + variable) #TODO add info about parameter in the title
fig.colorbar(set_pred, ax=axs[0])
if n_plot == 2:
set_true = axs[1].contourf(*grids_container, Z_true)
axs[1].set_title('Truth solution')
fig.colorbar(set_true, ax=axs[1])
if filename is None:
plt.show()
else:
fig.savefig(filename + " " + variable)
def plot_error(self, res, filename=None):
import matplotlib
matplotlib.use('GTK3Agg')
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(6,4))
if not isinstance(axs, np.ndarray): axs = [axs]
if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None:
grids_container = self.problem.data_solution['grid']
Z_true = self.problem.data_solution['grid_solution']
elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None:
pts_container = []
for mn, mx in self.problem.domain_bound:
pts_container.append(np.linspace(mn, mx, res))
grids_container = np.meshgrid(*pts_container)
Z_true = self.problem.truth_solution(*grids_container)
try:
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type)
Z_pred = self.model(unrolled_pts)
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
set_pred = axs[0].contourf(*grids_container, abs(Z_pred - Z_true))
axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + "Pointwise Error")
fig.colorbar(set_pred, ax=axs[0])
if filename is None:
plt.show()
else:
fig.savefig(filename)
except:
print("")
print("Something went wrong...")
print("Not able to plot the error. Please pass a data solution or a true solution")
'''
print(self.pred_loss.item(),loss.item(), self.old_loss.item())
if self.accelerate is not None:
if self.pred_loss > loss and loss >= self.old_loss:
self.current_lr = self.original_lr
#print('restart')
elif (loss-self.pred_loss).item() < 0.1:
self.current_lr += .5*self.current_lr
#print('powa')
else:
self.current_lr -= .5*self.current_lr
#print(self.current_lr)
#self.current_lr = min(loss.item()*3, 0.02)
for g in self.optimizer.param_groups:
g['lr'] = self.current_lr
'''

49
pina/problem.py Normal file
View File

@@ -0,0 +1,49 @@
import torch
class Problem(object):
def __init__(self, *args, **kwargs):
raise NotImplemented
@property
def variables(self):
return self._variables
@variables.setter
def variables(self, variables):
if variables is None:
variables = ['var']
self._variables = variables
@property
def boundary_conditions(self):
return self._bc
@boundary_conditions.setter
def boundary_conditions(self, bc):
if isinstance(bc, (list, tuple)):
bc = {'var': bc}
self._bc = bc
@property
def spatial_dimensions(self):
return self._spatial_dimensions
@staticmethod
def grad(output_, input_):
gradients = torch.autograd.grad(
output_,
input_.tensor,
grad_outputs=torch.ones(output_.size()).to(
dtype=input_.tensor.dtype,
device=input_.tensor.device),
create_graph=True, retain_graph=True, allow_unused=True)[0]
from pina.label_tensor import LabelTensor
return LabelTensor(gradients, input_.labels)
def __str__(self):
s = ''
#s = 'Variables: {}\n'.format(self.variables)
return s

11
pina/problem1d.py Normal file
View File

@@ -0,0 +1,11 @@
from .problem import Problem
import numpy as np
class Problem1D(Problem):
def __init__(self, variables=None, bc=None):
self._spatial_dimensions = 1
self.variables = variables
print(bc)
self.bc = bc

16
pina/problem2d.py Normal file
View File

@@ -0,0 +1,16 @@
from .problem import Problem
class Problem2D(Problem):
spatial_dimensions = 2
@property
def boundary_condition(self):
return self._boundary_condition
@boundary_condition.setter
def boundary_condition(self, bc):
self._boundary_condition = bc

27
pina/segment.py Normal file
View File

@@ -0,0 +1,27 @@
import torch
import numpy as np
from .chebyshev import chebyshev_roots
class Segment():
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def discretize(self, n, mode='random'):
pts = []
if mode == 'random':
iterator = np.random.uniform(0, 1, n)
elif mode == 'grid':
iterator = np.linspace(0, 1, n)
elif mode == 'chebyshev':
iterator = chebyshev_roots(n) * .5 + .5
for k in iterator:
x = self.p1[0] + k*(self.p2[0]-self.p1[0])
y = self.p1[1] + k*(self.p2[1]-self.p1[1])
pts.append((x, y))
pts = np.array(pts)
return pts

27
pina/tdproblem1d.py Normal file
View File

@@ -0,0 +1,27 @@
import numpy as np
from .problem1d import Problem1D
from .segment import Segment
class TimeDepProblem1D(Problem1D):
def __init__(self, variables=None, bc=None, initial=None, tend=None, domain_bound=None):
self.variables = variables
self._spatial_dimensions = 2
self.tend = tend
self.tstart = 0
if domain_bound is None:
bound_pts = [bc[0] for bc in self.boundary_conditions]
domain_bound = np.array([
[min(bound_pts), max(bound_pts)],
[self.tstart, self.tend ]
])
self.domain_bound = np.array([[-1, 1],[0, 1]])#domain_bound
print(domain_bound)
self.boundary_conditions = (
(Segment((bc[0][0], self.tstart), (bc[1][0], self.tstart)), initial),
(Segment((bc[0][0], self.tstart), (bc[0][0], self.tend)), bc[0][1]),
(Segment((bc[1][0], self.tstart), (bc[1][0], self.tend)), bc[1][1])
)