diff --git a/examples/problems/burgers.py b/examples/problems/burgers.py index 8f9e4c6..56c06fb 100644 --- a/examples/problems/burgers.py +++ b/examples/problems/burgers.py @@ -1,35 +1,26 @@ -import numpy as np -import scipy.io import torch -from pina.segment import Segment -from pina.cube import Cube -from pina.problem import TimeDependentProblem, Problem1D +from pina.problem import TimeDependentProblem, SpatialProblem from pina.operators import grad +from pina import Condition +from pina.span import Span -def tmp_grad(output_, input_): - return torch.autograd.grad( - output_, - input_.tensor, - grad_outputs=torch.ones(output_.size()).to( - dtype=input_.tensor.dtype, - device=input_.tensor.device), - create_graph=True, retain_graph=True, allow_unused=True)[0] -class Burgers1D(TimeDependentProblem, Problem1D): +class Burgers1D(TimeDependentProblem, SpatialProblem): - input_variables = ['x', 't'] + spatial_variables = ['x'] + temporal_variable = ['t'] output_variables = ['u'] - spatial_domain = Cube([[-1, 1]]) - temporal_domain = Cube([[0, 1]]) + domain = Span({'x': [-1, 1], 't': [0, 1]}) def burger_equation(input_, output_): grad_u = grad(output_['u'], input_) - grad_x, grad_t = tmp_grad(output_['u'], input_).T + grad_x, grad_t = grad(output_['u'], input_).T gradgrad_u_x = grad(grad_u['x'], input_) - grad_xx = tmp_grad(grad_x, input_)[:, 0] - return grad_u['t'] + output_['u']*grad_u['x'] - (0.01/torch.pi)*gradgrad_u_x['x'] - + return ( + grad_u['t'] + output_['u']*grad_u['x'] - + (0.01/torch.pi)*gradgrad_u_x['x'] + ) def nil_dirichlet(input_, output_): u_expected = 0.0 @@ -39,11 +30,9 @@ class Burgers1D(TimeDependentProblem, Problem1D): u_expected = -torch.sin(torch.pi*input_['x']) return output_['u'] - u_expected - - conditions = { - 'gamma1': {'location': Segment((-1, 0), (-1, 1)), 'func': nil_dirichlet}, - 'gamma2': {'location': Segment(( 1, 0), ( 1, 1)), 'func': nil_dirichlet}, - 'initia': {'location': Segment((-1, 0), ( 1, 0)), 'func': initial_condition}, - 'D': {'location': Cube([[-1, 1],[0,1]]), 'func': burger_equation} + 'gamma1': Condition(Span({'x': -1, 't': [0, 1]}), nil_dirichlet), + 'gamma2': Condition(Span({'x': 1, 't': [0, 1]}), nil_dirichlet), + 't0': Condition(Span({'x': [-1, 1], 't': 0}), initial_condition), + 'D': Condition(Span({'x': [-1, 1], 't': [0, 1]}), burger_equation), } diff --git a/examples/problems/parametric_poisson.py b/examples/problems/parametric_poisson.py index c42d339..23ec83f 100644 --- a/examples/problems/parametric_poisson.py +++ b/examples/problems/parametric_poisson.py @@ -1,43 +1,41 @@ -import numpy as np import torch -from pina.segment import Segment -from pina.cube import Cube -from pina.problem2d import Problem2D -from pina.problem import Problem + +from pina.problem import SpatialProblem, ParametricProblem +from pina.operators import nabla +from pina import Span, Condition -class ParametricPoisson2DProblem(Problem2D): +class ParametricPoisson(SpatialProblem, ParametricProblem): - def __init__(self): + spatial_variables = ['x', 'y'] + parameters = ['mu1', 'mu2'] + output_variables = ['u'] + domain = Span({'x': [-1, 1], 'y': [-1, 1]}) - def laplace_equation(input_, param_, output_): - grad_u = self.grad(output_['u'], input_) - gradgrad_u_x = self.grad(grad_u['x'], input_) - gradgrad_u_y = self.grad(grad_u['y'], input_) - force_term = torch.exp( - - 2*(input_['x'] - input_['mu1'])**2 - - 2*(input_['y'] - input_['mu2'])**2 - ) - return gradgrad_u_x['x'] + gradgrad_u_y['y'] - force_term + def laplace_equation(input_, output_): + force_term = torch.exp( + - 2*(input_['x'] - input_['mu1'])**2 - 2*(input_['y'] - + input_['mu2'])**2) + return nabla(output_['u'], input_) - force_term - def nil_dirichlet(input_, param_, output_): - value = 0.0 - return output_['u'] - value + def nil_dirichlet(input_, output_): + value = 0.0 + return output_['u'] - value - self.conditions = { - 'gamma1': {'location': Segment((-1, -1), ( 1, -1)),'func': nil_dirichlet}, - 'gamma2': {'location': Segment(( 1, -1), ( 1, 1)),'func': nil_dirichlet}, - 'gamma3': {'location': Segment(( 1, 1), (-1, 1)),'func': nil_dirichlet}, - 'gamma4': {'location': Segment((-1, 1), (-1, -1)),'func': nil_dirichlet}, - 'D': {'location': Cube([[-1, 1], [-1, 1]]), 'func': laplace_equation} - } - - self.input_variables = ['x', 'y'] - self.output_variables = ['u'] - self.parameters = ['mu1', 'mu2'] - #self.truth_solution = poisson_sol - self.spatial_domain = Cube([[0, 1], [0, 1]]) - self.parameter_domain = np.array([[-1, 1], [-1, 1]]) - - - #self.check() # Check the problem is correctly set + conditions = { + 'gamma1': Condition( + Span({'x': [-1, 1], 'y': 1, 'mu1': [-1, 1], 'mu2': [-1, 1]}), + nil_dirichlet), + 'gamma2': Condition( + Span({'x': [-1, 1], 'y': -1, 'mu1': [-1, 1], 'mu2': [-1, 1]}), + nil_dirichlet), + 'gamma3': Condition( + Span({'x': 1, 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), + nil_dirichlet), + 'gamma4': Condition( + Span({'x': -1, 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), + nil_dirichlet), + 'D': Condition( + Span({'x': [-1, 1], 'y': [-1, 1], 'mu1': [-1, 1], 'mu2': [-1, 1]}), + laplace_equation), + } diff --git a/examples/problems/poisson.py b/examples/problems/poisson.py new file mode 100644 index 0000000..1ffc564 --- /dev/null +++ b/examples/problems/poisson.py @@ -0,0 +1,35 @@ +import numpy as np +import torch + +from pina.problem import SpatialProblem +from pina.operators import nabla +from pina import Condition, Span + + +class Poisson(SpatialProblem): + + spatial_variables = ['x', 'y'] + output_variables = ['u'] + domain = Span({'x': [0, 1], 'y': [0, 1]}) + + def laplace_equation(input_, output_): + force_term = (torch.sin(input_['x']*torch.pi) * + torch.sin(input_['y']*torch.pi)) + return nabla(output_['u'], input_).flatten() - force_term + + def nil_dirichlet(input_, output_): + value = 0.0 + return output_['u'] - value + + conditions = { + 'gamma1': Condition(Span({'x': [-1, 1], 'y': 1}), nil_dirichlet), + 'gamma2': Condition(Span({'x': [-1, 1], 'y': -1}), nil_dirichlet), + 'gamma3': Condition(Span({'x': 1, 'y': [-1, 1]}), nil_dirichlet), + 'gamma4': Condition(Span({'x': -1, 'y': [-1, 1]}), nil_dirichlet), + 'D': Condition(Span({'x': [-1, 1], 'y': [-1, 1]}), laplace_equation), + } + + def poisson_sol(self, x, y): + return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2) + + truth_solution = poisson_sol diff --git a/examples/problems/poisson2D.py b/examples/problems/poisson2D.py deleted file mode 100644 index 4015fea..0000000 --- a/examples/problems/poisson2D.py +++ /dev/null @@ -1,35 +0,0 @@ -import numpy as np -import torch -from pina.segment import Segment -from pina.cube import Cube -from pina.problem import Problem2D -from pina.operators import grad, div, nabla - - -class Poisson2D(Problem2D): - - input_variables = ['x', 'y'] - output_variables = ['u'] - spatial_domain = Cube([[0, 1], [0, 1]]) - - def laplace_equation(input_, output_): - force_term = (torch.sin(input_['x']*torch.pi) - * torch.sin(input_['y']*torch.pi)) - return nabla(output_['u'], input_).flatten() - force_term - - def nil_dirichlet(input_, output_): - value = 0.0 - return output_['u'] - value - - conditions = { - 'gamma1': {'location': Segment((0, 0), (1, 0)), 'func': nil_dirichlet}, - 'gamma2': {'location': Segment((1, 0), (1, 1)), 'func': nil_dirichlet}, - 'gamma3': {'location': Segment((1, 1), (0, 1)), 'func': nil_dirichlet}, - 'gamma4': {'location': Segment((0, 1), (0, 0)), 'func': nil_dirichlet}, - 'D': {'location': Cube([[0, 1], [0, 1]]), 'func': laplace_equation} - } - - def poisson_sol(self, x, y): - return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2) - - truth_solution = poisson_sol diff --git a/examples/run_burgers.py b/examples/run_burgers.py index 9808a2a..2363561 100644 --- a/examples/run_burgers.py +++ b/examples/run_burgers.py @@ -1,15 +1,10 @@ -import sys -import numpy as np -import torch import argparse -from pina.pinn import PINN -from pina.ppinn import ParametricPINN as pPINN -from pina.label_tensor import LabelTensor -from torch.nn import ReLU, Tanh, Softplus -from problems.burgers import Burgers1D -from pina.deep_feed_forward import DeepFeedForward +import torch +from torch.nn import Softplus -from pina import Plotter +from pina import PINN, Plotter +from pina.model import FeedForward +from problems.burgers import Burgers1D class myFeature(torch.nn.Module): @@ -23,6 +18,7 @@ class myFeature(torch.nn.Module): def forward(self, x): return torch.sin(torch.pi * x[:, self.idx]) + if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run PINA") @@ -36,11 +32,8 @@ if __name__ == "__main__": feat = [myFeature(0)] if args.features else [] burgers_problem = Burgers1D() - model = DeepFeedForward( + model = FeedForward( layers=[30, 20, 10, 5], - #layers=[8, 8, 8], - #layers=[16, 8, 4, 4], - #layers=[20, 4, 4, 4], output_variables=burgers_problem.output_variables, input_variables=burgers_problem.input_variables, func=Softplus, @@ -57,7 +50,7 @@ if __name__ == "__main__": if args.s: pinn.span_pts(2000, 'latin', ['D']) - pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 'initia']) + pinn.span_pts(150, 'random', ['gamma1', 'gamma2', 't0']) pinn.train(5000, 100) pinn.save_state('pina.burger.{}.{}'.format(args.id_run, args.features)) else: diff --git a/examples/run_parametric_poisson.py b/examples/run_parametric_poisson.py index f25cb87..8c2f53a 100644 --- a/examples/run_parametric_poisson.py +++ b/examples/run_parametric_poisson.py @@ -1,27 +1,21 @@ -import sys -import numpy as np -import torch import argparse -from pina.pinn import PINN -from pina.ppinn import ParametricPINN as pPINN -from pina.label_tensor import LabelTensor -from torch.nn import ReLU, Tanh, Softplus -from problems.parametric_poisson import ParametricPoisson2DProblem as Poisson2D -from pina.deep_feed_forward import DeepFeedForward +import torch +from torch.nn import Softplus -from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh +from pina import PINN as pPINN +from problems.parametric_poisson import ParametricPoisson +from pina.model import FeedForward class myFeature(torch.nn.Module): """ - Feature: sin(x) """ - def __init__(self): super(myFeature, self).__init__() def forward(self, x): - return torch.exp(- 2*(x[:, 0] - x[:, 2])**2 - 2*(x[:, 1] - x[:, 3])**2) + return torch.exp(- 2*(x['x'] - x['mu1'])**2 - 2*(x['y'] - x['mu2'])**2) + if __name__ == "__main__": @@ -35,11 +29,11 @@ if __name__ == "__main__": feat = [myFeature()] if args.features else [] - poisson_problem = Poisson2D() - model = DeepFeedForward( + poisson_problem = ParametricPoisson() + model = FeedForward( layers=[200, 40, 10], output_variables=poisson_problem.output_variables, - input_variables=poisson_problem.input_variables+['mu1', 'mu2'], + input_variables=poisson_problem.input_variables, func=Softplus, extra_features=feat ) @@ -53,105 +47,10 @@ if __name__ == "__main__": if args.s: - pinn.span_pts(30, 'chebyshev', ['D']) - pinn.span_pts(50, 'grid', ['gamma1', 'gamma2', 'gamma3', 'gamma4']) - #pinn.plot_pts() + pinn.span_pts(2000, 'random', ['D']) + pinn.span_pts(200, 'random', ['gamma1', 'gamma2', 'gamma3', 'gamma4']) pinn.train(10000, 10) pinn.save_state('pina.poisson_param') else: pinn.load_state('pina.poisson_param') - #pinn.plot(40, torch.tensor([-0.8, -0.8])) - #pinn.plot(40, torch.tensor([ 0.8, 0.8])) - - from smithers.io import VTUHandler - from scipy.interpolate import griddata - import matplotlib - matplotlib.use('GTK3Agg') - import matplotlib.pyplot as plt - - res = 64 - fname_minus = 'Poisson_param_08minus000000.vtu' - param = torch.tensor([-0.8, -0.8]) - pts_container = [] - for mn, mx in [[-1, 1], [-1, 1]]: - pts_container.append(np.linspace(mn, mx, res)) - grids_container = np.meshgrid(*pts_container) - unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T - unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1) - - #unrolled_pts.to(dtype=self.dtype) - unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2']) - - Z_pred = pinn.model(unrolled_pts.tensor) - data = VTUHandler.read(fname_minus) - - - print(data['points'][:, :2].shape) - print(data['point_data']['f_16'].shape) - print(grids_container[0].shape) - print(grids_container[1].shape) - Z_truth = griddata(data['points'][:, :2], data['point_data']['f_16'], (grids_container[0], grids_container[1])) - - - err = np.abs(Z_truth + Z_pred.tensor.reshape(res, res).detach().numpy()) - - plt.subplot(1, 3, 1) - plt.pcolor(-Z_pred.tensor.reshape(res, res).detach()) - plt.colorbar() - plt.subplot(1, 3, 2) - plt.pcolor(Z_truth) - plt.colorbar() - plt.subplot(1, 3, 3) - plt.pcolor(err, vmin=0, vmax=0.009) - plt.colorbar() - plt.show() - - print(unrolled_pts.tensor.shape) - with open('parpoisson_minus_plot.txt', 'w') as f_: - f_.write('x y truth pred e\n') - for (x, y), tru, pre, e in zip(unrolled_pts[:, :2], Z_truth.reshape(-1, 1), -Z_pred.tensor.reshape(-1, 1), err.reshape(-1, 1)): - f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item())) - - fname_plus = 'Poisson_param_08plus000000.vtu' - param = torch.tensor([0.8, 0.8]) - pts_container = [] - for mn, mx in [[-1, 1], [-1, 1]]: - pts_container.append(np.linspace(mn, mx, res)) - grids_container = np.meshgrid(*pts_container) - unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T - unrolled_pts = torch.cat([unrolled_pts, param.double().repeat(unrolled_pts.shape[0]).reshape(-1, 2)], axis=1) - - #unrolled_pts.to(dtype=self.dtype) - unrolled_pts = LabelTensor(unrolled_pts, ['x1', 'x2', 'mu1', 'mu2']) - - Z_pred = pinn.model(unrolled_pts.tensor) - data = VTUHandler.read(fname_plus) - - - print(data['points'][:, :2].shape) - print(data['point_data']['f_16'].shape) - print(grids_container[0].shape) - print(grids_container[1].shape) - Z_truth = griddata(data['points'][:, :2], data['point_data']['f_16'], (grids_container[0], grids_container[1])) - - - err = np.abs(Z_truth + Z_pred.tensor.reshape(res, res).detach().numpy()) - - plt.subplot(1, 3, 1) - plt.pcolor(-Z_pred.tensor.reshape(res, res).detach()) - plt.colorbar() - plt.subplot(1, 3, 2) - plt.pcolor(Z_truth) - plt.colorbar() - plt.subplot(1, 3, 3) - print('gggggg') - plt.pcolor(err, vmin=0, vmax=0.001) - plt.colorbar() - plt.show() - with open('parpoisson_plus_plot.txt', 'w') as f_: - f_.write('x y truth pred e\n') - for (x, y), tru, pre, e in zip(unrolled_pts[:, :2], Z_truth.reshape(-1, 1), -Z_pred.tensor.reshape(-1, 1), err.reshape(-1, 1)): - f_.write('{} {} {} {} {}\n'.format(x.item(), y.item(), tru.item(), pre.item(), e.item())) - - diff --git a/pina/__init__.py b/pina/__init__.py index 00ddfa1..001036c 100644 --- a/pina/__init__.py +++ b/pina/__init__.py @@ -1,5 +1,15 @@ -from .pinn import PINN -from .deep_feed_forward import DeepFeedForward +__all__ = [ + 'PINN', + 'ParametricPINN', + 'LabelTensor', + 'Plotter', + 'Condition', + 'Span' +] from .label_tensor import LabelTensor +from .pinn import PINN +#from .ppinn import ParametricPINN from .plotter import Plotter +from .span import Span +from .condition import Condition diff --git a/pina/condition.py b/pina/condition.py new file mode 100644 index 0000000..398c387 --- /dev/null +++ b/pina/condition.py @@ -0,0 +1,32 @@ +""" """ +import torch + +from .location import Location + +class Condition: + def __init__(self, *args, **kwargs): + + if len(args) == 2 and not kwargs: + + if (isinstance(args[0], torch.Tensor) and + isinstance(args[1], torch.Tensor)): + self.input_points = args[0] + self.output_points = args[1] + elif isinstance(args[0], Location) and callable(args[1]): + self.location = args[0] + self.function = args[1] + else: + raise ValueError + + elif not args and len(kwargs) == 2: + + if 'input_points' in kwargs and 'output_points' in kwargs: + self.input_points = kwargs['input_points'] + self.output_points = kwargs['output_points'] + elif 'location' in kwargs and 'function' in kwargs: + self.location = kwargs['location'] + self.function = kwargs['function'] + else: + raise ValueError + else: + raise ValueError diff --git a/pina/cube.py b/pina/cube.py deleted file mode 100644 index 9b39356..0000000 --- a/pina/cube.py +++ /dev/null @@ -1,44 +0,0 @@ -import numpy as np -from .chebyshev import chebyshev_roots - - -class Cube(): - def __init__(self, bound): - self.bound = np.asarray(bound) - - def discretize(self, n, mode='random'): - - if mode == 'random': - pts = np.random.uniform(size=(n, self.bound.shape[0])) - elif mode == 'chebyshev': - pts = np.array([ - chebyshev_roots(n) * .5 + .5 - for _ in range(self.bound.shape[0])]) - grids = np.meshgrid(*pts) - pts = np.hstack([grid.reshape(-1, 1) for grid in grids]) - elif mode == 'grid': - pts = np.array([ - np.linspace(0, 1, n) - for _ in range(self.bound.shape[0])]) - grids = np.meshgrid(*pts) - pts = np.hstack([grid.reshape(-1, 1) for grid in grids]) - elif mode == 'lh' or mode == 'latin': - from scipy.stats import qmc - sampler = qmc.LatinHypercube(d=self.bound.shape[0]) - pts = sampler.random(n) - - # Scale pts - pts *= self.bound[:, 1] - self.bound[:, 0] - pts += self.bound[:, 0] - - return pts - - def meshgrid(self, n): - pts = np.array([ - np.linspace(0, 1, n) - for _ in range(self.bound.shape[0])]) - - pts *= self.bound[:, 1] - self.bound[:, 0] - pts += self.bound[:, 0] - - return np.meshgrid(*pts) diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 00a6448..16eec26 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -1,8 +1,8 @@ import torch -class LabelTensor(): +class LabelTensor(): - def __init__(self, x, labels): + def __init__(self, x, labels): if len(labels) != x.shape[1]: @@ -21,7 +21,19 @@ class LabelTensor(): return self.tensor def __str__(self): - return self.tensor, self.labels + return '{}\n {}\n'.format(self.labels, self.tensor) + + @property + def shape(self): + return self.tensor.shape + + @property + def dtype(self): + return self.tensor.dtype + + @property + def device(self): + return self.tensor.device @property def labels(self): diff --git a/pina/location.py b/pina/location.py new file mode 100644 index 0000000..9a7260b --- /dev/null +++ b/pina/location.py @@ -0,0 +1,9 @@ +from abc import ABCMeta, abstractmethod + + +class Location(metaclass=ABCMeta): + + @property + @abstractmethod + def sample(self): + pass diff --git a/pina/meta.py b/pina/meta.py index 80cbc7f..566279e 100644 --- a/pina/meta.py +++ b/pina/meta.py @@ -14,7 +14,7 @@ __title__ = "pina" __author__ = "Nicola Demo, Maria Strazzullo" __copyright__ = "Copyright 2021-2021, PINA contributors" __license__ = "MIT" -__version__ = "0.0.0" -__mail__ = 'demo.nicola@gmail.com, ' # TODO +__version__ = "0.0.1" +__mail__ = 'demo.nicola@gmail.com, ' # TODO __maintainer__ = __author__ __status__ = "Alpha" diff --git a/pina/model/__init__.py b/pina/model/__init__.py new file mode 100644 index 0000000..7c2f61e --- /dev/null +++ b/pina/model/__init__.py @@ -0,0 +1,7 @@ +__all__ = [ + 'FeedForward', + 'MultiFeedForward' +] + +from .feed_forward import FeedForward +from .multi_feed_forward import MultiFeedForward diff --git a/pina/deep_feed_forward.py b/pina/model/feed_forward.py similarity index 53% rename from pina/deep_feed_forward.py rename to pina/model/feed_forward.py index ca59c59..6095a97 100644 --- a/pina/deep_feed_forward.py +++ b/pina/model/feed_forward.py @@ -1,34 +1,21 @@ import torch import torch.nn as nn -import numpy as np -from .cube import Cube -torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 -from torch.nn import Tanh, ReLU -#import torch.nn.utils.prune as prune -from pina.adaptive_functions import AdaptiveLinear + from pina.label_tensor import LabelTensor -class DeepFeedForward(torch.nn.Module): - def __init__(self, - inner_size=20, - n_layers=2, - func=nn.Tanh, - input_variables=None, - output_variables=None, - layers=None, - extra_features=None): +class FeedForward(torch.nn.Module): + + def __init__(self, input_variables, output_variables, inner_size=20, + n_layers=2, func=nn.Tanh, layers=None, extra_features=None): ''' ''' - super(DeepFeedForward, self).__init__() + super().__init__() if extra_features is None: extra_features = [] self.extra_features = nn.Sequential(*extra_features) - if input_variables is None: input_variables = ['x'] - if output_variables is None: input_variables = ['y'] - self.input_variables = input_variables self.input_dimension = len(input_variables) @@ -37,45 +24,43 @@ class DeepFeedForward(torch.nn.Module): n_features = len(extra_features) - if layers is None: layers = [inner_size] * n_layers + if layers is None: + layers = [inner_size] * n_layers tmp_layers = layers.copy() - tmp_layers.insert(0, self.input_dimension+n_features)#-1) + tmp_layers.insert(0, self.input_dimension+n_features) tmp_layers.append(self.output_dimension) self.layers = [] for i in range(len(tmp_layers)-1): self.layers.append(nn.Linear(tmp_layers[i], tmp_layers[i+1])) - - if isinstance(func, list): self.functions = func else: self.functions = [func for _ in range(len(self.layers)-1)] - unique_list = [] for layer, func in zip(self.layers[:-1], self.functions): unique_list.append(layer) - if func is not None: unique_list.append(func()) + if func is not None: + unique_list.append(func()) unique_list.append(self.layers[-1]) self.model = nn.Sequential(*unique_list) - def forward(self, x): - + """ + """ nf = len(self.extra_features) if nf == 0: - return LabelTensor(self.model(x), self.output_variables) + return LabelTensor(self.model(x.tensor), self.output_variables) # if self.extra_features - #input_ = torch.zeros(x.shape[0], nf+self.input_dimension, dtype=x.dtype, device=x.device) - input_ = torch.zeros(x.shape[0], nf+x.shape[1], dtype=x.dtype, device=x.device) - input_[:, :x.shape[1]] = x - for i, feature in enumerate(self.extra_features, start=self.input_dimension): + input_ = torch.zeros(x.shape[0], nf+x.shape[1], dtype=x.dtype, + device=x.device) + input_[:, :x.shape[1]] = x.tensor + for i, feature in enumerate(self.extra_features, + start=self.input_dimension): input_[:, i] = feature(x) return LabelTensor(self.model(input_), self.output_variables) - - diff --git a/pina/model/multi_feed_forward.py b/pina/model/multi_feed_forward.py new file mode 100644 index 0000000..b5de7e7 --- /dev/null +++ b/pina/model/multi_feed_forward.py @@ -0,0 +1,17 @@ +import torch + +from .feed_forward import FeedForward + + +class MultiFeedForward(torch.nn.Module): + + def __init__(self, dff_dict): + ''' + ''' + super().__init__() + + if not isinstance(dff_dict, dict): + raise TypeError + + for name, constructor_args in dff_dict.items(): + setattr(self, name, FeedForward(**constructor_args)) diff --git a/pina/multi_deep_feed_forward.py b/pina/multi_deep_feed_forward.py deleted file mode 100644 index 3bde1e5..0000000 --- a/pina/multi_deep_feed_forward.py +++ /dev/null @@ -1,27 +0,0 @@ - -from .problem import Problem -import torch -import torch.nn as nn -import numpy as np -from .cube import Cube -torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 -from torch.nn import Tanh, ReLU -import torch.nn.utils.prune as prune -from pina.adaptive_functions import AdaptiveLinear -from pina.deep_feed_forward import DeepFeedForward - -class MultiDeepFeedForward(torch.nn.Module): - - def __init__(self, dff_dict): - ''' - ''' - super().__init__() - - if not isinstance(dff_dict, dict): - raise TypeError - - for name, constructor_args in dff_dict.items(): - setattr(self, name, DeepFeedForward(**constructor_args)) - - - diff --git a/pina/parametricproblem2d.py b/pina/parametricproblem2d.py deleted file mode 100644 index beefd35..0000000 --- a/pina/parametricproblem2d.py +++ /dev/null @@ -1,9 +0,0 @@ -from .problem2d import Problem2D -import numpy as np - -class ParametricProblem2D(Problem2D): - - def __init__(self, variables=None, bc=None, params_bound=None, domain_bound=None): - - Problem2D.__init__(self, variables=variables, bc=bc, domain_bound=domain_bound) - self.params_domain = params_bound diff --git a/pina/pinn.py b/pina/pinn.py index 0a51265..916f2be 100644 --- a/pina/pinn.py +++ b/pina/pinn.py @@ -163,17 +163,16 @@ class PINN(object): if locations == 'all': locations = [condition for condition in self.problem.conditions] - for location in locations: - manifold, func = self.problem.conditions[location].values() - if torch.is_tensor(manifold): - pts = manifold - else: - pts = manifold.discretize(n, mode) + condition = self.problem.conditions[location] - pts = torch.from_numpy(pts) + try: + pts = condition.location.sample(n, mode) + except: + pts = condition.input_points - self.input_pts[location] = LabelTensor(pts, self.problem.input_variables) + self.input_pts[location] = pts + print(pts.tensor.shape) self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device) self.input_pts[location].tensor.requires_grad_(True) self.input_pts[location].tensor.retain_grad() @@ -203,17 +202,15 @@ class PINN(object): while True: losses = [] + for condition_name in self.problem.conditions: + condition = self.problem.conditions[condition_name] pts = self.input_pts[condition_name] - predicted = self.model(pts.tensor) - if isinstance(self.problem.conditions[condition_name]['func'], list): - for func in self.problem.conditions[condition_name]['func']: - residuals = func(pts, predicted) - losses.append(self._compute_norm(residuals)) - else: - residuals = self.problem.conditions[condition_name]['func'](pts, predicted) - losses.append(self._compute_norm(residuals)) - #print(condition_name, losses[-1]) + + predicted = self.model(pts) + + residuals = condition.function(pts, predicted) + losses.append(self._compute_norm(residuals)) self.optimizer.zero_grad() sum(losses).backward() diff --git a/pina/plotter.py b/pina/plotter.py index 4ac0881..ff14e76 100644 --- a/pina/plotter.py +++ b/pina/plotter.py @@ -7,7 +7,7 @@ import torch from pina import LabelTensor from pina import PINN -from .problem import Problem2D, Problem1D, TimeDependentProblem +from .problem import SpatialProblem, TimeDependentProblem #from pina.tdproblem1d import TimeDepProblem1D @@ -79,13 +79,31 @@ class Plotter: - def plot(self, obj, filename=None): + def plot(self, obj, method='contourf', filename=None): """ """ - if isinstance(obj.problem, (TimeDependentProblem, Problem1D)): # time-dep 1D - self._plot_1D_TimeDep(obj, method='pcolor') - elif isinstance(obj.problem, Problem2D): # 2D - self._plot_2D(obj, method='pcolor') + res = 256 + pts = obj.problem.domain.sample(res, 'grid') + grids_container = [ + pts[:, 0].reshape(res, res), + pts[:, 1].reshape(res, res), + ] + predicted_output = obj.model(pts) + + if hasattr(obj.problem, 'truth_solution'): + truth_output = obj.problem.truth_solution(*pts.tensor.T).float() + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 6)) + + cb = getattr(axes[0], method)(*grids_container, predicted_output.tensor.reshape(res, res).detach()) + fig.colorbar(cb, ax=axes[0]) + cb = getattr(axes[1], method)(*grids_container, truth_output.reshape(res, res).detach()) + fig.colorbar(cb, ax=axes[1]) + cb = getattr(axes[2], method)(*grids_container, (truth_output-predicted_output.tensor.float().flatten()).detach().reshape(res, res)) + fig.colorbar(cb, ax=axes[2]) + else: + fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8, 6)) + cb = getattr(axes, method)(*grids_container, predicted_output.tensor.reshape(res, res).detach()) + fig.colorbar(cb, ax=axes) if filename: plt.savefig(filename) diff --git a/pina/ppinn.py b/pina/ppinn.py index b3fec7f..9bab9db 100644 --- a/pina/ppinn.py +++ b/pina/ppinn.py @@ -1,28 +1,16 @@ -from mpmath import chebyt, chop, taylor +import torch +import numpy as np from .problem import AbstractProblem -import torch -import torch.nn as nn -import numpy as np -from .cube import Cube -from .deep_feed_forward import DeepFeedForward -from pina.label_tensor import LabelTensor -from pina.pinn import PINN -torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 +from . import PINN, LabelTensor +torch.pi = torch.acos(torch.zeros(1)).item() * 2 # 3.1415927410125732 + class ParametricPINN(PINN): - def __init__(self, - problem, - model, - optimizer=torch.optim.Adam, - lr=0.001, - regularizer=0.00001, - data_weight=1., - dtype=torch.float64, - device='cpu', - lr_accelerate=None, - error_norm='mse'): + def __init__(self, problem, model, optimizer=torch.optim.Adam, lr=0.001, + regularizer=0.00001, data_weight=1., dtype=torch.float64, + device='cpu', lr_accelerate=None, error_norm='mse'): ''' :param Problem problem: the formualation of the problem. :param dict architecture: a dictionary containing the information to @@ -40,7 +28,7 @@ class ParametricPINN(PINN): default is `torch.float64`. :param float lr_accelete: the coefficient that controls the learning rate increase, such that, for all the epoches in which the loss is - decreasing, the learning_rate is update using + decreasing, the learning_rate is update using $learning_rate = learning_rate * lr_accelerate$. When the loss stops to decrease, the learning rate is set to the initial value [TODO test parameters] @@ -49,7 +37,7 @@ class ParametricPINN(PINN): self.problem = problem - + # self._architecture = architecture if architecture else dict() # self._architecture['input_dimension'] = self.problem.domain_bound.shape[0] # self._architecture['output_dimension'] = len(self.problem.variables) @@ -77,7 +65,7 @@ class ParametricPINN(PINN): self.trained_epoch = 0 self.optimizer = optimizer( self.model.parameters(), lr=lr, weight_decay=regularizer) - + self.data_weight = data_weight @property @@ -86,7 +74,7 @@ class ParametricPINN(PINN): @problem.setter def problem(self, problem): - if not isinstance(problem, Problem): + if not isinstance(problem, AbstractProblem): raise TypeError self._problem = problem @@ -103,124 +91,31 @@ class ParametricPINN(PINN): def get_phys_residuals(self): """ """ - + residuals = [] for equation in self.problem.equation: residuals.append(equation(self.phys_pts, self.model(self.phys_pts))) return residuals - def _compute_norm(self, vec): - """ - Compute the norm of the `vec` one-dimensional tensor based on the - `self.error_norm` attribute. - - .. todo: complete - - :param vec torch.tensor: the tensor - """ - if isinstance(self.error_norm, int): - return torch.sum(torch.abs(vec**self.error_norm))**(1./self.error_norm) - elif self.error_norm == 'mse': - return torch.mean(vec**2) - elif self.error_norm == 'me': - return torch.mean(torch.abs(vec)) - else: - raise RuntimeError - - def save_state(self, filename): - - checkpoint = { - 'epoch': self.trained_epoch, - 'model_state': self.model.state_dict(), - 'optimizer_state' : self.optimizer.state_dict(), - 'optimizer_class' : self.optimizer.__class__, - } - - # TODO save also architecture param? - #if isinstance(self.model, DeepFeedForward): - # checkpoint['model_class'] = self.model.__class__ - # checkpoint['model_structure'] = { - # } - torch.save(checkpoint, filename) - - def load_state(self, filename): - - checkpoint = torch.load(filename) - self.model.load_state_dict(checkpoint['model_state']) - - self.optimizer = checkpoint['optimizer_class'](self.model.parameters()) - self.optimizer.load_state_dict(checkpoint['optimizer_state']) - - self.trained_epoch = checkpoint['epoch'] - return self - - - def span_pts(self, n, mode='grid', locations='all'): - ''' - - ''' - - if locations == 'all': - locations = [condition for condition in self.problem.conditions] - - - for location in locations: - manifold, func = self.problem.conditions[location].values() - if torch.is_tensor(manifold): - pts = manifold - else: - pts = manifold.discretize(n, mode) - - pts = torch.from_numpy(pts) - - self.input_pts[location] = LabelTensor(pts, self.problem.input_variables) - self.input_pts[location].tensor.to(dtype=self.dtype, device=self.device) - self.input_pts[location].tensor.requires_grad_(True) - self.input_pts[location].tensor.retain_grad() - def train(self, stop=100, frequency_print=2, trial=None): epoch = 0 - ## TODO for elliptic - # parameters = torch.cat(torch.linspace( - # self.problem.parameter_domain[0, 0], - # self.problem.parameter_domain[0, 1], - # 5) - - ## for param laplacian - #parameters = torch.rand(50, 2)*2-1 - parameters = torch.from_numpy(Cube([[-1, 1], [-1, 1]]).discretize(5, 'grid')) - # alpha_p = torch.logspace(start=-2, end=0, steps=10) - # mu_p = torch.linspace(0.5, 3, 5) - # g1_, g2_ = torch.meshgrid(alpha_p, mu_p) - # parameters = torch.cat([g2_.reshape(-1, 1), g1_.reshape(-1, 1)], axis=1) - print(parameters) - while True: losses = [] for condition_name in self.problem.conditions: - + condition = self.problem.conditions[condition_name] pts = self.input_pts[condition_name] - pts = torch.cat([ - pts.tensor.repeat_interleave(parameters.shape[0], dim=0), - torch.tile(parameters, (pts.tensor.shape[0], 1)) - ], axis=1) - pts = LabelTensor(pts, self.problem.input_variables + self.problem.parameters) + predicted = self.model(pts.tensor) #predicted = self.model(pts) - if isinstance(self.problem.conditions[condition_name]['func'], list): - for func in self.problem.conditions[condition_name]['func']: - residuals = func(pts, None, predicted) - losses.append(self._compute_norm(residuals)) - else: - residuals = self.problem.conditions[condition_name]['func'](pts, None, predicted) - losses.append(self._compute_norm(residuals)) + residuals = condition.function(pts, predicted) + losses.append(self._compute_norm(residuals)) self.optimizer.zero_grad() sum(losses).backward() self.optimizer.step() @@ -268,15 +163,15 @@ class ParametricPINN(PINN): if trial: import optuna - rial.report(loss[0].item()+loss[1].item(), epoch) + trial.report(loss[0].item()+loss[1].item(), epoch) if trial.should_prune(): raise optuna.exceptions.TrialPruned() if isinstance(stop, int): - if epoch == stop: + if epoch == stop: break elif isinstance(stop, float): - if loss[0].item() + loss[1].item() < stop: + if loss[0].item() + loss[1].item() < stop: break if epoch % frequency_print == 0: @@ -289,7 +184,7 @@ class ParametricPINN(PINN): def error(self, dtype='l2', res=100): - + import numpy as np if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None: pts_container = [] @@ -305,8 +200,8 @@ class ParametricPINN(PINN): unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device) Z_pred = self.model(unrolled_pts) Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape) - - if dtype == 'l2': + + if dtype == 'l2': return np.linalg.norm(Z_pred - Z_true)/np.linalg.norm(Z_true) else: # TODO H1 @@ -339,7 +234,7 @@ class ParametricPINN(PINN): for i, output in enumerate(Z_pred.tensor.T, start=1): output = output.detach().numpy().reshape(grids_container[0].shape) - plt.subplot(1, n, i) + plt.subplot(1, n, i) plt.contourf(*grids_container, output) plt.colorbar() @@ -354,14 +249,14 @@ class ParametricPINN(PINN): import matplotlib matplotlib.use('GTK3Agg') import matplotlib.pyplot as plt - + if hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None: n_plot = 2 elif hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None: n_plot = 2 else: n_plot = 1 - + fig, axs = plt.subplots(nrows=1, ncols=n_plot, figsize=(n_plot*6,4)) if not isinstance(axs, np.ndarray): axs = [axs] @@ -369,14 +264,14 @@ class ParametricPINN(PINN): grids_container = self.problem.data_solution['grid'] Z_true = self.problem.data_solution['grid_solution'] elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None: - + pts_container = [] for mn, mx in self.problem.domain_bound: pts_container.append(np.linspace(mn, mx, res)) grids_container = np.meshgrid(*pts_container) Z_true = self.problem.truth_solution(*grids_container) - + pts_container = [] for mn, mx in self.problem.domain_bound: pts_container.append(np.linspace(mn, mx, res)) @@ -396,38 +291,38 @@ class ParametricPINN(PINN): set_pred = axs[0].contourf(*grids_container, Z_pred) axs[0].set_title('PINN [trained epoch = {}]'.format(self.trained_epoch) + " " + variable) #TODO add info about parameter in the title fig.colorbar(set_pred, ax=axs[0]) - + if n_plot == 2: - + set_true = axs[1].contourf(*grids_container, Z_true) - + axs[1].set_title('Truth solution') fig.colorbar(set_true, ax=axs[1]) - + if filename is None: plt.show() else: fig.savefig(filename + " " + variable) - + def plot_error(self, res, filename=None): import matplotlib matplotlib.use('GTK3Agg') import matplotlib.pyplot as plt - - + + fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(6,4)) if not isinstance(axs, np.ndarray): axs = [axs] if hasattr(self.problem, 'data_solution') and self.problem.data_solution is not None: grids_container = self.problem.data_solution['grid'] Z_true = self.problem.data_solution['grid_solution'] - elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None: + elif hasattr(self.problem, 'truth_solution') and self.problem.truth_solution is not None: pts_container = [] for mn, mx in self.problem.domain_bound: pts_container.append(np.linspace(mn, mx, res)) grids_container = np.meshgrid(*pts_container) - Z_true = self.problem.truth_solution(*grids_container) + Z_true = self.problem.truth_solution(*grids_container) try: unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.type) @@ -449,7 +344,7 @@ class ParametricPINN(PINN): ''' print(self.pred_loss.item(),loss.item(), self.old_loss.item()) if self.accelerate is not None: - if self.pred_loss > loss and loss >= self.old_loss: + if self.pred_loss > loss and loss >= self.old_loss: self.current_lr = self.original_lr #print('restart') elif (loss-self.pred_loss).item() < 0.1: @@ -459,7 +354,7 @@ if self.accelerate is not None: self.current_lr -= .5*self.current_lr #print(self.current_lr) #self.current_lr = min(loss.item()*3, 0.02) - + for g in self.optimizer.param_groups: - g['lr'] = self.current_lr + g['lr'] = self.current_lr ''' diff --git a/pina/problem/__init__.py b/pina/problem/__init__.py index ddb3b2f..2307dff 100644 --- a/pina/problem/__init__.py +++ b/pina/problem/__init__.py @@ -1,4 +1,11 @@ +__all__ = [ + 'AbstractProblem', + 'SpatialProblem', + 'TimeDependentProblem', + 'ParametricProblem', +] + from .abstract_problem import AbstractProblem -from .problem2d import Problem2D -from .problem1d import Problem1D +from .spatial_problem import SpatialProblem from .timedep_problem import TimeDependentProblem +from .parametric_problem import ParametricProblem diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index 0b8a4f0..e9a0f76 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -4,9 +4,23 @@ from abc import ABCMeta, abstractmethod class AbstractProblem(metaclass=ABCMeta): @property - @abstractmethod def input_variables(self): - pass + variables = [] + + if hasattr(self, 'spatial_variables'): + variables += self.spatial_variables + if hasattr(self, 'temporal_variable'): + variables += self.temporal_variable + if hasattr(self, 'parameters'): + variables += self.parameters + if hasattr(self, 'custom_variables'): + variables += self.custom_variables + + return variables + + @input_variables.setter + def input_variables(self, variables): + raise NotImplementedError @property @abstractmethod diff --git a/pina/problem/parametric_problem.py b/pina/problem/parametric_problem.py new file mode 100644 index 0000000..67a1cb5 --- /dev/null +++ b/pina/problem/parametric_problem.py @@ -0,0 +1,11 @@ +from abc import abstractmethod + +from .abstract_problem import AbstractProblem + + +class ParametricProblem(AbstractProblem): + + @property + @abstractmethod + def parameters(self): + pass diff --git a/pina/problem/problem1d.py b/pina/problem/problem1d.py deleted file mode 100644 index 15564ee..0000000 --- a/pina/problem/problem1d.py +++ /dev/null @@ -1,6 +0,0 @@ -from .abstract_problem import AbstractProblem - - -class Problem1D(AbstractProblem): - - spatial_dimensions = 1 diff --git a/pina/problem/problem2d.py b/pina/problem/problem2d.py deleted file mode 100644 index 9ab90ee..0000000 --- a/pina/problem/problem2d.py +++ /dev/null @@ -1,6 +0,0 @@ -from .abstract_problem import AbstractProblem - - -class Problem2D(AbstractProblem): - - spatial_dimensions = 2 diff --git a/pina/problem/spatial_problem.py b/pina/problem/spatial_problem.py new file mode 100644 index 0000000..0e93809 --- /dev/null +++ b/pina/problem/spatial_problem.py @@ -0,0 +1,10 @@ +from abc import abstractmethod + +from .abstract_problem import AbstractProblem + + +class SpatialProblem(AbstractProblem): + + @abstractmethod + def spatial_variables(self): + pass diff --git a/pina/problem/timedep_problem.py b/pina/problem/timedep_problem.py index ed36862..79fee87 100644 --- a/pina/problem/timedep_problem.py +++ b/pina/problem/timedep_problem.py @@ -1,6 +1,11 @@ +from abc import abstractmethod + from .abstract_problem import AbstractProblem class TimeDependentProblem(AbstractProblem): - pass + @property + @abstractmethod + def temporal_variable(self): + pass diff --git a/pina/segment.py b/pina/segment.py deleted file mode 100644 index 5414ba9..0000000 --- a/pina/segment.py +++ /dev/null @@ -1,27 +0,0 @@ -import torch -import numpy as np -from .chebyshev import chebyshev_roots - -class Segment(): - def __init__(self, p1, p2): - self.p1 = p1 - self.p2 = p2 - - def discretize(self, n, mode='random'): - pts = [] - - if mode == 'random': - iterator = np.random.uniform(0, 1, n) - elif mode == 'grid': - iterator = np.linspace(0, 1, n) - elif mode == 'chebyshev': - iterator = chebyshev_roots(n) * .5 + .5 - - for k in iterator: - x = self.p1[0] + k*(self.p2[0]-self.p1[0]) - y = self.p1[1] + k*(self.p2[1]-self.p1[1]) - pts.append((x, y)) - - pts = np.array(pts) - return pts - diff --git a/pina/span.py b/pina/span.py new file mode 100644 index 0000000..c8f33a6 --- /dev/null +++ b/pina/span.py @@ -0,0 +1,68 @@ +import numpy as np +from .chebyshev import chebyshev_roots +import torch + +from .location import Location +from .label_tensor import LabelTensor + + +class Span(Location): + def __init__(self, span_dict): + + self.fixed_ = {} + self.range_ = {} + + for k, v in span_dict.items(): + if isinstance(v, (int, float)): + self.fixed_[k] = v + elif isinstance(v, (list, tuple)) and len(v) == 2: + self.range_[k] = v + else: + raise TypeError + + def sample(self, n, mode='random'): + + bounds = np.array(list(self.range_.values())) + if mode == 'random': + pts = np.random.uniform(size=(n, bounds.shape[0])) + elif mode == 'chebyshev': + pts = np.array([ + chebyshev_roots(n) * .5 + .5 + for _ in range(bounds.shape[0])]) + grids = np.meshgrid(*pts) + pts = np.hstack([grid.reshape(-1, 1) for grid in grids]) + elif mode == 'grid': + pts = np.array([ + np.linspace(0, 1, n) + for _ in range(bounds.shape[0])]) + grids = np.meshgrid(*pts) + pts = np.hstack([grid.reshape(-1, 1) for grid in grids]) + elif mode == 'lh' or mode == 'latin': + from scipy.stats import qmc + sampler = qmc.LatinHypercube(d=bounds.shape[0]) + pts = sampler.random(n) + + # Scale pts + pts *= bounds[:, 1] - bounds[:, 0] + pts += bounds[:, 0] + pts = torch.from_numpy(pts) + pts_range_ = LabelTensor(pts, list(self.range_.keys())) + + fixed = torch.Tensor(list(self.fixed_.values())) + pts_fixed_ = torch.ones(pts_range_.tensor.shape[0], len(self.fixed_)) * fixed + pts_fixed_ = LabelTensor(pts_fixed_, list(self.fixed_.keys())) + + if self.fixed_: + return LabelTensor.hstack([pts_range_, pts_fixed_]) + else: + return pts_range_ + + def meshgrid(self, n): + pts = np.array([ + np.linspace(0, 1, n) + for _ in range(self.bound.shape[0])]) + + pts *= self.bound[:, 1] - self.bound[:, 0] + pts += self.bound[:, 0] + + return np.meshgrid(*pts)