fix old codes
This commit is contained in:
@@ -39,3 +39,5 @@ class Condition:
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
if hasattr(self, 'function') and not isinstance(self.function, list):
|
||||
self.function = [self.function]
|
||||
|
||||
@@ -55,6 +55,8 @@ class LabelTensor(torch.Tensor):
|
||||
[0.9518, 0.1025],
|
||||
[0.8066, 0.9615]])
|
||||
'''
|
||||
if x.ndim == 1:
|
||||
x = x.reshape(-1, 1)
|
||||
|
||||
if isinstance(labels, str):
|
||||
labels = [labels]
|
||||
@@ -130,3 +132,8 @@ class LabelTensor(torch.Tensor):
|
||||
new_labels = self.labels + lt.labels
|
||||
new_tensor = torch.cat((self, lt), dim=1)
|
||||
return LabelTensor(new_tensor, new_labels)
|
||||
|
||||
def __str__(self):
|
||||
s = f'labels({str(self.labels)})\n'
|
||||
s += super().__str__()
|
||||
return s
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from pina.label_tensor import LabelTensor
|
||||
import warnings
|
||||
import copy
|
||||
from pina import LabelTensor
|
||||
|
||||
|
||||
class DeepONet(torch.nn.Module):
|
||||
"""
|
||||
@@ -75,39 +74,24 @@ class DeepONet(torch.nn.Module):
|
||||
self.trunk_net = trunk_net
|
||||
self.branch_net = branch_net
|
||||
|
||||
if features:
|
||||
# if features:
|
||||
# if len(features) != features_net.layers[0].in_features:
|
||||
# raise ValueError('Incompatible features')
|
||||
# if trunk_out_dim != features_net.layers[-1].out_features:
|
||||
# raise ValueError('Incompatible features')
|
||||
|
||||
self.features = features
|
||||
# self.features = features
|
||||
# self.features_net = nn.Sequential(
|
||||
# nn.Linear(len(features), 10), nn.Softplus(),
|
||||
# # nn.Linear(10, 10), nn.Softplus(),
|
||||
# nn.Linear(10, trunk_out_dim)
|
||||
# )
|
||||
self.features_net = nn.Sequential(
|
||||
nn.Linear(len(features), trunk_out_dim)
|
||||
)
|
||||
|
||||
|
||||
|
||||
# self.features_net = nn.Sequential(
|
||||
# nn.Linear(len(features), trunk_out_dim)
|
||||
# )
|
||||
|
||||
self.reduction = nn.Linear(trunk_out_dim, self.output_dimension)
|
||||
|
||||
# print(self.branch_net.output_variables)
|
||||
# print(self.trunk_net.output_variables)
|
||||
# if isinstance(self.branch_net.output_variables, int) and isinstance(self.branch_net.output_variables, int):
|
||||
# if self.branch_net.output_dimension == self.trunk_net.output_dimension:
|
||||
# self.inner_size = self.branch_net.output_dimension
|
||||
# print('qui')
|
||||
# else:
|
||||
# raise ValueError('Branch and trunk networks have not the same output dimension.')
|
||||
# else:
|
||||
# warnings.warn("The output dimension of the branch and trunk networks has been imposed by default as 10 for each output variable. To set it change the output_variable of networks to an integer.")
|
||||
# self.inner_size = self.output_dimension*inner_size
|
||||
|
||||
@property
|
||||
def input_variables(self):
|
||||
"""The input variables of the model"""
|
||||
@@ -121,19 +105,33 @@ class DeepONet(torch.nn.Module):
|
||||
:return: the output computed by the model.
|
||||
:rtype: LabelTensor
|
||||
"""
|
||||
input_feature = []
|
||||
for feature in self.features:
|
||||
#print(feature)
|
||||
input_feature.append(feature(x))
|
||||
input_feature = torch.cat(input_feature, dim=1)
|
||||
# print(x.shape)
|
||||
#input_feature = []
|
||||
#for feature in self.features:
|
||||
# #print(feature)
|
||||
# input_feature.append(feature(x))
|
||||
#input_feature = torch.cat(input_feature, dim=1)
|
||||
|
||||
branch_output = self.branch_net(
|
||||
x.extract(self.branch_net.input_variables))
|
||||
# print(branch_output.shape)
|
||||
trunk_output = self.trunk_net(
|
||||
x.extract(self.trunk_net.input_variables))
|
||||
feat_output = self.features_net(input_feature)
|
||||
output_ = self.reduction(branch_output * trunk_output * feat_output)
|
||||
output_ = self.reduction(trunk_output * feat_output)
|
||||
# print(trunk_output.shape)
|
||||
#feat_output = self.features_net(input_feature)
|
||||
# print(feat_output.shape)
|
||||
# inner_input = torch.cat([
|
||||
# branch_output * trunk_output,
|
||||
# branch_output,
|
||||
# trunk_output,
|
||||
# feat_output], dim=1)
|
||||
# print(inner_input.shape)
|
||||
|
||||
# output_ = self.reduction(inner_input)
|
||||
# print(output_.shape)
|
||||
print(branch_output.shape)
|
||||
print(trunk_output.shape)
|
||||
output_ = self.reduction(trunk_output * branch_output)
|
||||
output_ = LabelTensor(output_, self.output_variables)
|
||||
# local_size = int(trunk_output.shape[1]/self.output_dimension)
|
||||
# for i, var in enumerate(self.output_variables):
|
||||
|
||||
@@ -93,20 +93,10 @@ class FeedForward(torch.nn.Module):
|
||||
if self.input_variables:
|
||||
x = x.extract(self.input_variables)
|
||||
|
||||
labels = []
|
||||
features = []
|
||||
for i, feature in enumerate(self.extra_features):
|
||||
labels.append('k{}'.format(i))
|
||||
features.append(feature(x))
|
||||
|
||||
if labels and features:
|
||||
features = torch.cat(features, dim=1)
|
||||
features_tensor = LabelTensor(features, labels)
|
||||
input_ = x.append(features_tensor) # TODO error when no LabelTens
|
||||
else:
|
||||
input_ = x
|
||||
x = x.append(feature(x))
|
||||
|
||||
if self.output_variables:
|
||||
return LabelTensor(self.model(input_), self.output_variables)
|
||||
return LabelTensor(self.model(x), self.output_variables)
|
||||
else:
|
||||
return self.model(input_)
|
||||
return self.model(x)
|
||||
|
||||
@@ -4,62 +4,158 @@ import torch
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
|
||||
def grad(output_, input_):
|
||||
def grad(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
|
||||
def grad_scalar_output(output_, input_, d):
|
||||
"""
|
||||
"""
|
||||
|
||||
if len(output_.labels) != 1:
|
||||
raise RuntimeError
|
||||
if not all([di in input_.labels for di in d]):
|
||||
raise RuntimeError
|
||||
|
||||
output_fieldname = output_.labels[0]
|
||||
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.dtype,
|
||||
device=input_.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
gradients.labels = input_.labels
|
||||
gradients = gradients.extract(d)
|
||||
gradients.labels = [f'd{output_fieldname}d{i}' for i in d]
|
||||
|
||||
return gradients
|
||||
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] == 1: # scalar output ################################
|
||||
|
||||
if components != output_.labels:
|
||||
raise RuntimeError
|
||||
gradients = grad_scalar_output(output_, input_, d)
|
||||
|
||||
elif output_.shape[1] >= 2: # vector output ##############################
|
||||
|
||||
for i, c in enumerate(components):
|
||||
c_output = output_.extract([c])
|
||||
if i == 0:
|
||||
gradients = grad_scalar_output(c_output, input_, d)
|
||||
else:
|
||||
gradients = gradients.append(
|
||||
grad_scalar_output(c_output, input_, d))
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
return gradients
|
||||
|
||||
|
||||
def div(output_, input_, components=None, d=None):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if not isinstance(input_, LabelTensor):
|
||||
raise TypeError
|
||||
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(output_.size()).to(
|
||||
dtype=input_.dtype,
|
||||
device=input_.device),
|
||||
create_graph=True, retain_graph=True, allow_unused=True)[0]
|
||||
return LabelTensor(gradients, input_.labels)
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] < 2 or len(components) < 2:
|
||||
raise ValueError('div supported only for vector field')
|
||||
|
||||
if len(components) != len(d):
|
||||
raise ValueError
|
||||
|
||||
grad_output = grad(output_, input_, components, d)
|
||||
div = torch.empty(input_.shape[0], len(components))
|
||||
labels = [None] * len(components)
|
||||
for i, c in enumerate(components):
|
||||
c_fields = [f'd{c}d{di}' for di in d]
|
||||
div[:, i] = grad_output.extract(c_fields).sum(axis=1)
|
||||
labels[i] = '+'.join(c_fields)
|
||||
|
||||
return LabelTensor(div, labels)
|
||||
|
||||
|
||||
def div(output_, input_):
|
||||
def nabla(output_, input_, components=None, d=None, method='std'):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
if output_.shape[1] == 1:
|
||||
div = grad(output_, input_).sum(axis=1)
|
||||
else: # really to improve
|
||||
a = []
|
||||
for o in output_.T:
|
||||
a.append(grad(o, input_).extract(['x', 'y']))
|
||||
div = torch.zeros(output_.shape[0], 1)
|
||||
for i in range(output_.shape[1]):
|
||||
div += a[i][:, i].reshape(-1, 1)
|
||||
if d is None:
|
||||
d = input_.labels
|
||||
|
||||
return div
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
if len(components) != len(d) and len(components) != 1:
|
||||
raise ValueError
|
||||
|
||||
if method == 'divgrad':
|
||||
raise NotImplementedError
|
||||
# TODO fix
|
||||
# grad_output = grad(output_, input_, components, d)
|
||||
# result = div(grad_output, input_, d=d)
|
||||
elif method == 'std':
|
||||
|
||||
if len(components) == 1:
|
||||
grad_output = grad(output_, input_, components=components, d=d)
|
||||
result = torch.zeros(output_.shape[0], 1)
|
||||
for i, label in enumerate(grad_output.labels):
|
||||
gg = grad(grad_output, input_, d=d, components=[label])
|
||||
result[:, 0] += gg[:, i]
|
||||
labels = [f'dd{components[0]}']
|
||||
|
||||
else:
|
||||
result = torch.empty(input_.shape[0], len(components))
|
||||
labels = [None] * len(components)
|
||||
for idx, (ci, di) in enumerate(zip(components, d)):
|
||||
|
||||
if not isinstance(ci, list):
|
||||
ci = [ci]
|
||||
if not isinstance(di, list):
|
||||
di = [di]
|
||||
|
||||
grad_output = grad(output_, input_, components=ci, d=di)
|
||||
result[:, idx] = grad(grad_output, input_, d=di).flatten()
|
||||
labels[idx] = f'dd{ci}dd{di}'
|
||||
|
||||
return LabelTensor(result, labels)
|
||||
|
||||
|
||||
def nabla(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
return div(grad(output_, input_).extract(['x', 'y']), input_)
|
||||
|
||||
|
||||
def advection_term(output_, input_):
|
||||
def advection(output_, input_):
|
||||
"""
|
||||
TODO
|
||||
"""
|
||||
dimension = len(output_.labels)
|
||||
for i, label in enumerate(output_.labels):
|
||||
# compute u dot gradient in each direction
|
||||
gradient_loc = grad(output_.extract([label]), input_).extract(input_.labels[:dimension])
|
||||
# compute u dot gradient in each direction
|
||||
gradient_loc = grad(output_.extract([label]),
|
||||
input_).extract(input_.labels[:dimension])
|
||||
dim_0 = gradient_loc.shape[0]
|
||||
dim_1 = gradient_loc.shape[1]
|
||||
u_dot_grad_loc = torch.bmm(output_.view(dim_0, 1, dim_1),
|
||||
gradient_loc.view(dim_0, dim_1, 1))
|
||||
gradient_loc.view(dim_0, dim_1, 1))
|
||||
u_dot_grad_loc = LabelTensor(torch.reshape(u_dot_grad_loc,
|
||||
(u_dot_grad_loc.shape[0], u_dot_grad_loc.shape[1])), [input_.labels[i]])
|
||||
if i==0:
|
||||
(u_dot_grad_loc.shape[0],
|
||||
u_dot_grad_loc.shape[1])),
|
||||
[input_.labels[i]])
|
||||
if i == 0:
|
||||
adv_term = u_dot_grad_loc
|
||||
else:
|
||||
adv_term = adv_term.append(u_dot_grad_loc)
|
||||
|
||||
87
pina/pinn.py
87
pina/pinn.py
@@ -123,6 +123,7 @@ class PINN(object):
|
||||
def span_pts(self, *args, **kwargs):
|
||||
"""
|
||||
>>> pinn.span_pts(n=10, mode='grid')
|
||||
>>> pinn.span_pts(n=10, mode='grid', location=['bound1'])
|
||||
>>> pinn.span_pts(n=10, mode='grid', variables=['x'])
|
||||
"""
|
||||
|
||||
@@ -133,23 +134,30 @@ class PINN(object):
|
||||
n1 = tensor1.shape[0]
|
||||
n2 = tensor2.shape[0]
|
||||
|
||||
tensor1 = LabelTensor(tensor1.repeat(n2, 1), labels=tensor1.labels)
|
||||
tensor1 = LabelTensor(
|
||||
tensor1.repeat(n2, 1),
|
||||
labels=tensor1.labels)
|
||||
tensor2 = LabelTensor(
|
||||
tensor2.repeat_interleave(n1, dim=0), labels=tensor2.labels)
|
||||
tensor2.repeat_interleave(n1, dim=0),
|
||||
labels=tensor2.labels)
|
||||
return tensor1.append(tensor2)
|
||||
else:
|
||||
pass
|
||||
elif len(tensors):
|
||||
return tensors[0]
|
||||
|
||||
if isinstance(args[0], int) and isinstance(args[1], str):
|
||||
pass
|
||||
variables = self.problem.input_variables
|
||||
argument = {}
|
||||
argument['n'] = int(args[0])
|
||||
argument['mode'] = args[1]
|
||||
argument['variables'] = self.problem.input_variables
|
||||
arguments = [argument]
|
||||
elif all(isinstance(arg, dict) for arg in args):
|
||||
print(args)
|
||||
arguments = args
|
||||
pass
|
||||
elif all(key in kwargs for key in ['n', 'mode']):
|
||||
variables = self.problem.input_variables
|
||||
pass
|
||||
argument = {}
|
||||
argument['n'] = kwargs['n']
|
||||
argument['mode'] = kwargs['mode']
|
||||
argument['variables'] = self.problem.input_variables
|
||||
arguments = [argument]
|
||||
else:
|
||||
raise RuntimeError
|
||||
|
||||
@@ -174,26 +182,23 @@ class PINN(object):
|
||||
self.input_pts[location].requires_grad_(True)
|
||||
self.input_pts[location].retain_grad()
|
||||
|
||||
|
||||
def plot_pts(self, locations='all'):
|
||||
import matplotlib
|
||||
# matplotlib.use('GTK3Agg')
|
||||
if locations == 'all':
|
||||
locations = [condition for condition in self.problem.conditions]
|
||||
for location in locations:
|
||||
x = self.input_pts[location].extract(['x'])
|
||||
y = self.input_pts[location].extract(['y'])
|
||||
plt.plot(x.detach(), y.detach(), '.', label=location)
|
||||
# np.savetxt('burgers_{}_pts.txt'.format(location), self.input_pts[location].tensor.detach(), header='x y', delimiter=' ')
|
||||
plt.legend()
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def train(self, stop=100, frequency_print=2, trial=None):
|
||||
|
||||
epoch = 0
|
||||
|
||||
header = []
|
||||
for condition_name in self.problem.conditions:
|
||||
condition = self.problem.conditions[condition_name]
|
||||
|
||||
if hasattr(condition, 'function'):
|
||||
if isinstance(condition.function, list):
|
||||
for function in condition.function:
|
||||
header.append(f'{condition_name}{function.__name__}')
|
||||
|
||||
continue
|
||||
|
||||
header.append(f'{condition_name}')
|
||||
|
||||
while True:
|
||||
|
||||
losses = []
|
||||
@@ -204,23 +209,20 @@ class PINN(object):
|
||||
if hasattr(condition, 'function'):
|
||||
pts = self.input_pts[condition_name]
|
||||
predicted = self.model(pts)
|
||||
if isinstance(condition.function, list):
|
||||
for function in condition.function:
|
||||
residuals = function(pts, predicted)
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
losses.append(local_loss)
|
||||
else:
|
||||
residuals = condition.function(pts, predicted)
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
for function in condition.function:
|
||||
residuals = function(pts, predicted)
|
||||
local_loss = (
|
||||
condition.data_weight*self._compute_norm(
|
||||
residuals))
|
||||
losses.append(local_loss)
|
||||
elif hasattr(condition, 'output_points'):
|
||||
pts = condition.input_points
|
||||
# print(pts)
|
||||
predicted = self.model(pts)
|
||||
# print(predicted)
|
||||
residuals = predicted - condition.output_points
|
||||
local_loss = condition.data_weight*self._compute_norm(residuals)
|
||||
local_loss = (
|
||||
condition.data_weight*self._compute_norm(residuals))
|
||||
losses.append(local_loss)
|
||||
|
||||
self.optimizer.zero_grad()
|
||||
|
||||
sum(losses).backward()
|
||||
@@ -239,12 +241,21 @@ class PINN(object):
|
||||
|
||||
if isinstance(stop, int):
|
||||
if epoch == stop:
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
print()
|
||||
break
|
||||
elif isinstance(stop, float):
|
||||
if sum(losses) < stop:
|
||||
break
|
||||
|
||||
if epoch % frequency_print == 0:
|
||||
if epoch % frequency_print == 0 or epoch == 1:
|
||||
print(' {:5s} {:12s} '.format('', 'sum'), end='')
|
||||
for name in header:
|
||||
print('{:12.12s} '.format(name), end='')
|
||||
print()
|
||||
|
||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
||||
for loss in losses:
|
||||
print('{:.6e} '.format(loss), end='')
|
||||
|
||||
@@ -79,7 +79,7 @@ class Plotter:
|
||||
|
||||
|
||||
|
||||
def plot(self, obj, method='contourf', component='u', parametric=False, params_value=1, filename=None):
|
||||
def plot(self, obj, method='contourf', component='u', parametric=False, params_value=1.5, filename=None):
|
||||
"""
|
||||
"""
|
||||
res = 256
|
||||
|
||||
@@ -22,7 +22,7 @@ class Span(Location):
|
||||
|
||||
def sample(self, n, mode='random', variables='all'):
|
||||
|
||||
if variables=='all':
|
||||
if variables == 'all':
|
||||
spatial_range_ = list(self.range_.keys())
|
||||
spatial_fixed_ = list(self.fixed_.keys())
|
||||
bounds = np.array(list(self.range_.values()))
|
||||
@@ -41,6 +41,7 @@ class Span(Location):
|
||||
fixed.append(int(self.fixed_[variable]))
|
||||
fixed = torch.Tensor(fixed)
|
||||
bounds = np.array(bounds)
|
||||
|
||||
if mode == 'random':
|
||||
pts = np.random.uniform(size=(n, bounds.shape[0]))
|
||||
elif mode == 'chebyshev':
|
||||
@@ -59,6 +60,7 @@ class Span(Location):
|
||||
from scipy.stats import qmc
|
||||
sampler = qmc.LatinHypercube(d=bounds.shape[0])
|
||||
pts = sampler.random(n)
|
||||
|
||||
# Scale pts
|
||||
pts *= bounds[:, 1] - bounds[:, 0]
|
||||
pts += bounds[:, 0]
|
||||
|
||||
Reference in New Issue
Block a user