Codacy Small Bug Fixes:

- cleaned up imports
- cleaned up some code
- added docstrings
This commit is contained in:
SpartaKushK
2023-07-25 16:43:45 +02:00
committed by Nicola Demo
parent bd88e24174
commit 625a77c0d5
13 changed files with 132 additions and 118 deletions

View File

@@ -1,53 +1,52 @@
# import numpy as np
# import torch
# from pina.problem import Problem
# from pina.segment import Segment
# from pina.cube import Cube
# from pina.problem2d import Problem2D
import numpy as np
import torch
from pina.segment import Segment
from pina.cube import Cube
from pina.problem2d import Problem2D
# xmin, xmax, ymin, ymax = -1, 1, -1, 1
xmin, xmax, ymin, ymax = -1, 1, -1, 1
# class ParametricEllipticOptimalControl(Problem2D):
class ParametricEllipticOptimalControl(Problem2D):
# def __init__(self, alpha=1):
def __init__(self, alpha=1):
# def term1(input_, param_, output_):
# grad_p = self.grad(output_['p'], input_)
# gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
# gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
# return output_['y'] - param_ - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
def term1(input_, param_, output_):
grad_p = self.grad(output_['p'], input_)
gradgrad_p_x1 = self.grad(grad_p['x1'], input_)
gradgrad_p_x2 = self.grad(grad_p['x2'], input_)
return output_['y'] - param_ - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2'])
# def term2(input_, param_, output_):
# grad_y = self.grad(output_['y'], input_)
# gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
# gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
# return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param']
def term2(input_, param_, output_):
grad_y = self.grad(output_['y'], input_)
gradgrad_y_x1 = self.grad(grad_y['x1'], input_)
gradgrad_y_x2 = self.grad(grad_y['x2'], input_)
return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param']
# def term3(input_, param_, output_):
# return output_['p'] - output_['u_param']*alpha
def term3(input_, param_, output_):
return output_['p'] - output_['u_param']*alpha
# def term(input_, param_, output_):
# return term1( input_, param_, output_) +term2( input_, param_, output_) + term3( input_, param_, output_)
def term(input_, param_, output_):
return term1( input_, param_, output_) +term2( input_, param_, output_) + term3( input_, param_, output_)
# def nil_dirichlet(input_, param_, output_):
# y_value = 0.0
# p_value = 0.0
# return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
def nil_dirichlet(input_, param_, output_):
y_value = 0.0
p_value = 0.0
return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value)
# self.conditions = {
# 'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
# 'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
# 'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
# 'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
# 'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': term},
# #'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
# #'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
# }
self.conditions = {
'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet},
'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet},
'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet},
'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet},
'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': term},
#'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2},
#'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3}
}
self.input_variables = ['x1', 'x2']
self.output_variables = ['u', 'p', 'y']
self.parameters = ['mu']
self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
self.parameter_domain = np.array([[0.5, 3]])
# self.input_variables = ['x1', 'x2']
# self.output_variables = ['u', 'p', 'y']
# self.parameters = ['mu']
# self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]])
# self.parameter_domain = np.array([[0.5, 3]])
raise NotImplementedError('not available problem at the moment...')

View File

@@ -1,3 +1,4 @@
""" Poisson equation example. """
import numpy as np
import torch
@@ -46,8 +47,9 @@ class Poisson(SpatialProblem):
# real poisson solution
def poisson_sol(self, pts):
return -(
torch.sin(pts.extract(['x'])*torch.pi)*
torch.sin(pts.extract(['x'])*torch.pi) *
torch.sin(pts.extract(['y'])*torch.pi)
)/(2*torch.pi**2)
# return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2)
truth_solution = poisson_sol

View File

@@ -1,3 +1,4 @@
"""Run PINA on Burgers equation"""
import argparse
import torch
from torch.nn import Softplus
@@ -11,6 +12,7 @@ class myFeature(torch.nn.Module):
"""
Feature: sin(pi*x)
"""
def __init__(self, idx):
super(myFeature, self).__init__()
self.idx = idx

View File

@@ -1,84 +1,87 @@
# import argparse
# import numpy as np
# import torch
# from torch.nn import Softplus
import argparse
import numpy as np
import torch
from torch.nn import Softplus
# from pina import PINN, LabelTensor, Plotter
# from pina.model import MultiFeedForward
# from problems.parametric_elliptic_optimal_control_alpha_variable import (
# ParametricEllipticOptimalControl)
from pina import PINN, LabelTensor, Plotter
from pina.model import MultiFeedForward
from problems.parametric_elliptic_optimal_control_alpha_variable import (
ParametricEllipticOptimalControl)
# class myFeature(torch.nn.Module):
# """
# Feature: sin(x)
# """
class myFeature(torch.nn.Module):
"""
Feature: sin(x)
"""
# def __init__(self):
# super(myFeature, self).__init__()
def __init__(self):
super(myFeature, self).__init__()
# def forward(self, x):
# t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1)
# return LabelTensor(t, ['k0'])
def forward(self, x):
t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1)
return LabelTensor(t, ['k0'])
# class CustomMultiDFF(MultiFeedForward):
class CustomMultiDFF(MultiFeedForward):
# def __init__(self, dff_dict):
# super().__init__(dff_dict)
def __init__(self, dff_dict):
super().__init__(dff_dict)
# def forward(self, x):
# out = self.uu(x)
# p = LabelTensor((out.extract(['u_param']) * x.extract(['alpha'])), ['p'])
# return out.append(p)
def forward(self, x):
out = self.uu(x)
p = LabelTensor(
(out.extract(['u_param']) * x.extract(['alpha'])), ['p'])
return out.append(p)
# if __name__ == "__main__":
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="Run PINA")
# group = parser.add_mutually_exclusive_group(required=True)
# group.add_argument("-s", "-save", action="store_true")
# group.add_argument("-l", "-load", action="store_true")
# args = parser.parse_args()
parser = argparse.ArgumentParser(description="Run PINA")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-s", "-save", action="store_true")
group.add_argument("-l", "-load", action="store_true")
args = parser.parse_args()
# opc = ParametricEllipticOptimalControl()
# model = CustomMultiDFF(
# {
# 'uu': {
# 'input_variables': ['x1', 'x2', 'mu', 'alpha'],
# 'output_variables': ['u_param', 'y'],
# 'layers': [40, 40, 20],
# 'func': Softplus,
# 'extra_features': [myFeature()],
# },
# }
# )
opc = ParametricEllipticOptimalControl()
model = CustomMultiDFF(
{
'uu': {
'input_variables': ['x1', 'x2', 'mu', 'alpha'],
'output_variables': ['u_param', 'y'],
'layers': [40, 40, 20],
'func': Softplus,
'extra_features': [myFeature()],
},
}
)
# pinn = PINN(
# opc,
# model,
# lr=0.002,
# error_norm='mse',
# regularizer=1e-8)
pinn = PINN(
opc,
model,
lr=0.002,
error_norm='mse',
regularizer=1e-8)
# if args.s:
if args.s:
# pinn.span_pts(
# {'variables': ['x1', 'x2'], 'mode': 'random', 'n': 100},
# {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
# locations=['D'])
# pinn.span_pts(
# {'variables': ['x1', 'x2'], 'mode': 'grid', 'n': 20},
# {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
# locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
pinn.span_pts(
{'variables': ['x1', 'x2'], 'mode': 'random', 'n': 100},
{'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
locations=['D'])
pinn.span_pts(
{'variables': ['x1', 'x2'], 'mode': 'grid', 'n': 20},
{'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5},
locations=['gamma1', 'gamma2', 'gamma3', 'gamma4'])
# pinn.train(1000, 20)
# pinn.save_state('pina.ocp')
pinn.train(1000, 20)
pinn.save_state('pina.ocp')
# else:
# pinn.load_state('pina.ocp')
# plotter = Plotter()
# plotter.plot(pinn, components='y', fixed_variables={'alpha': 0.01, 'mu': 1.0})
# plotter.plot(pinn, components='u_param', fixed_variables={'alpha': 0.01, 'mu': 1.0})
# plotter.plot(pinn, components='p', fixed_variables={'alpha': 0.01, 'mu': 1.0})
raise NotImplementedError('not available problem at the moment...')
else:
pinn.load_state('pina.ocp')
plotter = Plotter()
plotter.plot(pinn, components='y',
fixed_variables={'alpha': 0.01, 'mu': 1.0})
plotter.plot(pinn, components='u_param',
fixed_variables={'alpha': 0.01, 'mu': 1.0})
plotter.plot(pinn, components='p', fixed_variables={
'alpha': 0.01, 'mu': 1.0})

View File

@@ -48,7 +48,10 @@ class myRBF(torch.nn.Module):
result = self.a * torch.exp(-(x - self.b)**2/(self.c**2))
return result
class myModel(torch.nn.Module):
""" Model for the Poisson equation."""
def __init__(self):
super().__init__()
@@ -56,10 +59,11 @@ class myModel(torch.nn.Module):
self.ffn_y = myRBF('y')
def forward(self, x):
result = self.ffn_x(x) * self.ffn_y(x)
result = self.ffn_x(x) * self.ffn_y(x)
result.labels = ['u']
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run PINA")
parser.add_argument("-s", "--save", action="store_true")
@@ -97,7 +101,8 @@ if __name__ == "__main__":
print(model.ffn_x.b)
print(model.ffn_x.c)
xi = torch.linspace(0, 1, 64).reshape(-1, 1).as_subclass(LabelTensor)
xi = torch.linspace(0, 1, 64).reshape(-1,
1).as_subclass(LabelTensor)
xi.labels = ['x']
yi = model.ffn_x(xi)
y_truth = -torch.sin(xi*torch.pi)

View File

@@ -1,10 +1,9 @@
import argparse
import sys
import numpy as np
import torch
from torch.nn import ReLU, Tanh, Softplus
from pina import PINN, LabelTensor, Plotter
from pina import PINN, Plotter
from pina.model import FeedForward
from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh
from problems.stokes import Stokes

View File

@@ -1,3 +1,4 @@
""" Implementation of adaptive linear layer. """
import torch
from torch.nn.parameter import Parameter

View File

@@ -1,7 +1,7 @@
import torch
from torch.nn.parameter import Parameter
class AdaptiveReLU(torch.nn.Module):
class AdaptiveReLU(torch.nn.Module, Parameter):
'''
Implementation of soft exponential activation.
Shape:

View File

@@ -1,4 +1,3 @@
""" """
from torch.utils.data import Dataset, DataLoader
import functools

View File

@@ -82,7 +82,8 @@ class CartesianDomain(Location):
pts = chebyshev_roots(n).mul(.5).add(.5).reshape(-1, 1)
elif mode == 'grid':
pts = torch.linspace(0, 1, n).reshape(-1, 1)
elif mode == 'lh' or mode == 'latin':
# elif mode == 'lh' or mode == 'latin':
elif mode in ['lh', 'latin']:
pts = torch_lhs(n, dim)
pts *= bounds[:, 1] - bounds[:, 0]

View File

@@ -1,3 +1,4 @@
""" Integral class for continous convolution"""
import torch

View File

@@ -4,6 +4,8 @@ from ..utils import check_consistency
class Network(torch.nn.Module):
""" Network class with starndard forward method
and possibility to pass extra features."""
def __init__(self, model, extra_features=None):
super().__init__()

View File

@@ -1,6 +1,5 @@
""" Module for plotting. """
import matplotlib.pyplot as plt
import numpy as np
import torch
from pina import LabelTensor
@@ -43,7 +42,8 @@ class Plotter:
proj = '3d' if len(variables) == 3 else None
ax = fig.add_subplot(projection=proj)
for location in solver.problem.input_pts:
coords = solver.problem.input_pts[location].extract(variables).T.detach()
coords = solver.problem.input_pts[location].extract(
variables).T.detach()
if coords.shape[0] == 1: # 1D samples
ax.plot(coords[0], torch.zeros(coords[0].shape), '.',
label=location)