From 625a77c0d592499fd80460d8264710aceba208b1 Mon Sep 17 00:00:00 2001 From: SpartaKushK Date: Tue, 25 Jul 2023 16:43:45 +0200 Subject: [PATCH] Codacy Small Bug Fixes: - cleaned up imports - cleaned up some code - added docstrings --- .../parametric_elliptic_optimal_control.py | 83 ++++++----- examples/problems/poisson.py | 4 +- examples/run_burgers.py | 2 + ...rametric_elliptic_optimal_control_alpha.py | 135 +++++++++--------- examples/run_poisson_deeponet.py | 9 +- examples/run_stokes.py | 3 +- pina/adaptive_functions/adaptive_linear.py | 1 + pina/adaptive_functions/adaptive_relu.py | 2 +- pina/dataset.py | 1 - pina/geometry/cartesian.py | 3 +- pina/model/layers/integral.py | 1 + pina/model/network.py | 2 + pina/plotter.py | 4 +- 13 files changed, 132 insertions(+), 118 deletions(-) diff --git a/examples/problems/parametric_elliptic_optimal_control.py b/examples/problems/parametric_elliptic_optimal_control.py index 8aa33fd..f244d93 100644 --- a/examples/problems/parametric_elliptic_optimal_control.py +++ b/examples/problems/parametric_elliptic_optimal_control.py @@ -1,53 +1,52 @@ -# import numpy as np -# import torch -# from pina.problem import Problem -# from pina.segment import Segment -# from pina.cube import Cube -# from pina.problem2d import Problem2D +import numpy as np +import torch +from pina.segment import Segment +from pina.cube import Cube +from pina.problem2d import Problem2D -# xmin, xmax, ymin, ymax = -1, 1, -1, 1 +xmin, xmax, ymin, ymax = -1, 1, -1, 1 -# class ParametricEllipticOptimalControl(Problem2D): +class ParametricEllipticOptimalControl(Problem2D): -# def __init__(self, alpha=1): + def __init__(self, alpha=1): -# def term1(input_, param_, output_): -# grad_p = self.grad(output_['p'], input_) -# gradgrad_p_x1 = self.grad(grad_p['x1'], input_) -# gradgrad_p_x2 = self.grad(grad_p['x2'], input_) -# return output_['y'] - param_ - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2']) + def term1(input_, param_, output_): + grad_p = self.grad(output_['p'], input_) + gradgrad_p_x1 = self.grad(grad_p['x1'], input_) + gradgrad_p_x2 = self.grad(grad_p['x2'], input_) + return output_['y'] - param_ - (gradgrad_p_x1['x1'] + gradgrad_p_x2['x2']) -# def term2(input_, param_, output_): -# grad_y = self.grad(output_['y'], input_) -# gradgrad_y_x1 = self.grad(grad_y['x1'], input_) -# gradgrad_y_x2 = self.grad(grad_y['x2'], input_) -# return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param'] + def term2(input_, param_, output_): + grad_y = self.grad(output_['y'], input_) + gradgrad_y_x1 = self.grad(grad_y['x1'], input_) + gradgrad_y_x2 = self.grad(grad_y['x2'], input_) + return - (gradgrad_y_x1['x1'] + gradgrad_y_x2['x2']) - output_['u_param'] -# def term3(input_, param_, output_): -# return output_['p'] - output_['u_param']*alpha + def term3(input_, param_, output_): + return output_['p'] - output_['u_param']*alpha -# def term(input_, param_, output_): -# return term1( input_, param_, output_) +term2( input_, param_, output_) + term3( input_, param_, output_) + def term(input_, param_, output_): + return term1( input_, param_, output_) +term2( input_, param_, output_) + term3( input_, param_, output_) -# def nil_dirichlet(input_, param_, output_): -# y_value = 0.0 -# p_value = 0.0 -# return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value) + def nil_dirichlet(input_, param_, output_): + y_value = 0.0 + p_value = 0.0 + return torch.abs(output_['y'] - y_value) + torch.abs(output_['p'] - p_value) -# self.conditions = { -# 'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet}, -# 'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet}, -# 'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet}, -# 'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet}, -# 'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': term}, -# #'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2}, -# #'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3} -# } + self.conditions = { + 'gamma1': {'location': Segment((xmin, ymin), (xmax, ymin)), 'func': nil_dirichlet}, + 'gamma2': {'location': Segment((xmax, ymin), (xmax, ymax)), 'func': nil_dirichlet}, + 'gamma3': {'location': Segment((xmax, ymax), (xmin, ymax)), 'func': nil_dirichlet}, + 'gamma4': {'location': Segment((xmin, ymax), (xmin, ymin)), 'func': nil_dirichlet}, + 'D1': {'location': Cube([[xmin, xmax], [ymin, ymax]]), 'func': term}, + #'D2': {'location': Cube([[0, 1], [0, 1]]), 'func': term2}, + #'D3': {'location': Cube([[0, 1], [0, 1]]), 'func': term3} + } + + self.input_variables = ['x1', 'x2'] + self.output_variables = ['u', 'p', 'y'] + self.parameters = ['mu'] + self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]]) + self.parameter_domain = np.array([[0.5, 3]]) -# self.input_variables = ['x1', 'x2'] -# self.output_variables = ['u', 'p', 'y'] -# self.parameters = ['mu'] -# self.spatial_domain = Cube([[xmin, xmax], [xmin, xmax]]) -# self.parameter_domain = np.array([[0.5, 3]]) -raise NotImplementedError('not available problem at the moment...') diff --git a/examples/problems/poisson.py b/examples/problems/poisson.py index 1fe4013..c451f53 100644 --- a/examples/problems/poisson.py +++ b/examples/problems/poisson.py @@ -1,3 +1,4 @@ +""" Poisson equation example. """ import numpy as np import torch @@ -46,8 +47,9 @@ class Poisson(SpatialProblem): # real poisson solution def poisson_sol(self, pts): return -( - torch.sin(pts.extract(['x'])*torch.pi)* + torch.sin(pts.extract(['x'])*torch.pi) * torch.sin(pts.extract(['y'])*torch.pi) )/(2*torch.pi**2) + # return -(np.sin(x*np.pi)*np.sin(y*np.pi))/(2*np.pi**2) truth_solution = poisson_sol diff --git a/examples/run_burgers.py b/examples/run_burgers.py index 4c72212..8aade04 100644 --- a/examples/run_burgers.py +++ b/examples/run_burgers.py @@ -1,3 +1,4 @@ +"""Run PINA on Burgers equation""" import argparse import torch from torch.nn import Softplus @@ -11,6 +12,7 @@ class myFeature(torch.nn.Module): """ Feature: sin(pi*x) """ + def __init__(self, idx): super(myFeature, self).__init__() self.idx = idx diff --git a/examples/run_parametric_elliptic_optimal_control_alpha.py b/examples/run_parametric_elliptic_optimal_control_alpha.py index efe6d62..29964fa 100644 --- a/examples/run_parametric_elliptic_optimal_control_alpha.py +++ b/examples/run_parametric_elliptic_optimal_control_alpha.py @@ -1,84 +1,87 @@ -# import argparse -# import numpy as np -# import torch -# from torch.nn import Softplus +import argparse +import numpy as np +import torch +from torch.nn import Softplus -# from pina import PINN, LabelTensor, Plotter -# from pina.model import MultiFeedForward -# from problems.parametric_elliptic_optimal_control_alpha_variable import ( -# ParametricEllipticOptimalControl) +from pina import PINN, LabelTensor, Plotter +from pina.model import MultiFeedForward +from problems.parametric_elliptic_optimal_control_alpha_variable import ( + ParametricEllipticOptimalControl) -# class myFeature(torch.nn.Module): -# """ -# Feature: sin(x) -# """ +class myFeature(torch.nn.Module): + """ + Feature: sin(x) + """ -# def __init__(self): -# super(myFeature, self).__init__() + def __init__(self): + super(myFeature, self).__init__() -# def forward(self, x): -# t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1) -# return LabelTensor(t, ['k0']) + def forward(self, x): + t = (-x.extract(['x1'])**2+1) * (-x.extract(['x2'])**2+1) + return LabelTensor(t, ['k0']) -# class CustomMultiDFF(MultiFeedForward): +class CustomMultiDFF(MultiFeedForward): -# def __init__(self, dff_dict): -# super().__init__(dff_dict) + def __init__(self, dff_dict): + super().__init__(dff_dict) -# def forward(self, x): -# out = self.uu(x) -# p = LabelTensor((out.extract(['u_param']) * x.extract(['alpha'])), ['p']) -# return out.append(p) + def forward(self, x): + out = self.uu(x) + p = LabelTensor( + (out.extract(['u_param']) * x.extract(['alpha'])), ['p']) + return out.append(p) -# if __name__ == "__main__": +if __name__ == "__main__": -# parser = argparse.ArgumentParser(description="Run PINA") -# group = parser.add_mutually_exclusive_group(required=True) -# group.add_argument("-s", "-save", action="store_true") -# group.add_argument("-l", "-load", action="store_true") -# args = parser.parse_args() + parser = argparse.ArgumentParser(description="Run PINA") + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument("-s", "-save", action="store_true") + group.add_argument("-l", "-load", action="store_true") + args = parser.parse_args() -# opc = ParametricEllipticOptimalControl() -# model = CustomMultiDFF( -# { -# 'uu': { -# 'input_variables': ['x1', 'x2', 'mu', 'alpha'], -# 'output_variables': ['u_param', 'y'], -# 'layers': [40, 40, 20], -# 'func': Softplus, -# 'extra_features': [myFeature()], -# }, -# } -# ) + opc = ParametricEllipticOptimalControl() + model = CustomMultiDFF( + { + 'uu': { + 'input_variables': ['x1', 'x2', 'mu', 'alpha'], + 'output_variables': ['u_param', 'y'], + 'layers': [40, 40, 20], + 'func': Softplus, + 'extra_features': [myFeature()], + }, + } + ) -# pinn = PINN( -# opc, -# model, -# lr=0.002, -# error_norm='mse', -# regularizer=1e-8) + pinn = PINN( + opc, + model, + lr=0.002, + error_norm='mse', + regularizer=1e-8) -# if args.s: + if args.s: -# pinn.span_pts( -# {'variables': ['x1', 'x2'], 'mode': 'random', 'n': 100}, -# {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5}, -# locations=['D']) -# pinn.span_pts( -# {'variables': ['x1', 'x2'], 'mode': 'grid', 'n': 20}, -# {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5}, -# locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) + pinn.span_pts( + {'variables': ['x1', 'x2'], 'mode': 'random', 'n': 100}, + {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5}, + locations=['D']) + pinn.span_pts( + {'variables': ['x1', 'x2'], 'mode': 'grid', 'n': 20}, + {'variables': ['mu', 'alpha'], 'mode': 'grid', 'n': 5}, + locations=['gamma1', 'gamma2', 'gamma3', 'gamma4']) -# pinn.train(1000, 20) -# pinn.save_state('pina.ocp') + pinn.train(1000, 20) + pinn.save_state('pina.ocp') -# else: -# pinn.load_state('pina.ocp') -# plotter = Plotter() -# plotter.plot(pinn, components='y', fixed_variables={'alpha': 0.01, 'mu': 1.0}) -# plotter.plot(pinn, components='u_param', fixed_variables={'alpha': 0.01, 'mu': 1.0}) -# plotter.plot(pinn, components='p', fixed_variables={'alpha': 0.01, 'mu': 1.0}) -raise NotImplementedError('not available problem at the moment...') \ No newline at end of file + else: + pinn.load_state('pina.ocp') + plotter = Plotter() + plotter.plot(pinn, components='y', + fixed_variables={'alpha': 0.01, 'mu': 1.0}) + plotter.plot(pinn, components='u_param', + fixed_variables={'alpha': 0.01, 'mu': 1.0}) + plotter.plot(pinn, components='p', fixed_variables={ + 'alpha': 0.01, 'mu': 1.0}) diff --git a/examples/run_poisson_deeponet.py b/examples/run_poisson_deeponet.py index d1a9891..2bd566a 100644 --- a/examples/run_poisson_deeponet.py +++ b/examples/run_poisson_deeponet.py @@ -48,7 +48,10 @@ class myRBF(torch.nn.Module): result = self.a * torch.exp(-(x - self.b)**2/(self.c**2)) return result + class myModel(torch.nn.Module): + """ Model for the Poisson equation.""" + def __init__(self): super().__init__() @@ -56,10 +59,11 @@ class myModel(torch.nn.Module): self.ffn_y = myRBF('y') def forward(self, x): - result = self.ffn_x(x) * self.ffn_y(x) + result = self.ffn_x(x) * self.ffn_y(x) result.labels = ['u'] return result + if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run PINA") parser.add_argument("-s", "--save", action="store_true") @@ -97,7 +101,8 @@ if __name__ == "__main__": print(model.ffn_x.b) print(model.ffn_x.c) - xi = torch.linspace(0, 1, 64).reshape(-1, 1).as_subclass(LabelTensor) + xi = torch.linspace(0, 1, 64).reshape(-1, + 1).as_subclass(LabelTensor) xi.labels = ['x'] yi = model.ffn_x(xi) y_truth = -torch.sin(xi*torch.pi) diff --git a/examples/run_stokes.py b/examples/run_stokes.py index cf3fd41..c0711a9 100644 --- a/examples/run_stokes.py +++ b/examples/run_stokes.py @@ -1,10 +1,9 @@ import argparse -import sys import numpy as np import torch from torch.nn import ReLU, Tanh, Softplus -from pina import PINN, LabelTensor, Plotter +from pina import PINN, Plotter from pina.model import FeedForward from pina.adaptive_functions import AdaptiveSin, AdaptiveCos, AdaptiveTanh from problems.stokes import Stokes diff --git a/pina/adaptive_functions/adaptive_linear.py b/pina/adaptive_functions/adaptive_linear.py index ffe24f6..f4b72e9 100644 --- a/pina/adaptive_functions/adaptive_linear.py +++ b/pina/adaptive_functions/adaptive_linear.py @@ -1,3 +1,4 @@ +""" Implementation of adaptive linear layer. """ import torch from torch.nn.parameter import Parameter diff --git a/pina/adaptive_functions/adaptive_relu.py b/pina/adaptive_functions/adaptive_relu.py index 0e99ba9..14cf133 100644 --- a/pina/adaptive_functions/adaptive_relu.py +++ b/pina/adaptive_functions/adaptive_relu.py @@ -1,7 +1,7 @@ import torch from torch.nn.parameter import Parameter -class AdaptiveReLU(torch.nn.Module): +class AdaptiveReLU(torch.nn.Module, Parameter): ''' Implementation of soft exponential activation. Shape: diff --git a/pina/dataset.py b/pina/dataset.py index bba569a..8093cd5 100644 --- a/pina/dataset.py +++ b/pina/dataset.py @@ -1,4 +1,3 @@ -""" """ from torch.utils.data import Dataset, DataLoader import functools diff --git a/pina/geometry/cartesian.py b/pina/geometry/cartesian.py index 5a0050b..6c8f810 100644 --- a/pina/geometry/cartesian.py +++ b/pina/geometry/cartesian.py @@ -82,7 +82,8 @@ class CartesianDomain(Location): pts = chebyshev_roots(n).mul(.5).add(.5).reshape(-1, 1) elif mode == 'grid': pts = torch.linspace(0, 1, n).reshape(-1, 1) - elif mode == 'lh' or mode == 'latin': + # elif mode == 'lh' or mode == 'latin': + elif mode in ['lh', 'latin']: pts = torch_lhs(n, dim) pts *= bounds[:, 1] - bounds[:, 0] diff --git a/pina/model/layers/integral.py b/pina/model/layers/integral.py index 3269134..cbbc048 100644 --- a/pina/model/layers/integral.py +++ b/pina/model/layers/integral.py @@ -1,3 +1,4 @@ +""" Integral class for continous convolution""" import torch diff --git a/pina/model/network.py b/pina/model/network.py index 76d94f6..ff9dce5 100644 --- a/pina/model/network.py +++ b/pina/model/network.py @@ -4,6 +4,8 @@ from ..utils import check_consistency class Network(torch.nn.Module): + """ Network class with starndard forward method + and possibility to pass extra features.""" def __init__(self, model, extra_features=None): super().__init__() diff --git a/pina/plotter.py b/pina/plotter.py index d92e780..fd22d06 100644 --- a/pina/plotter.py +++ b/pina/plotter.py @@ -1,6 +1,5 @@ """ Module for plotting. """ import matplotlib.pyplot as plt -import numpy as np import torch from pina import LabelTensor @@ -43,7 +42,8 @@ class Plotter: proj = '3d' if len(variables) == 3 else None ax = fig.add_subplot(projection=proj) for location in solver.problem.input_pts: - coords = solver.problem.input_pts[location].extract(variables).T.detach() + coords = solver.problem.input_pts[location].extract( + variables).T.detach() if coords.shape[0] == 1: # 1D samples ax.plot(coords[0], torch.zeros(coords[0].shape), '.', label=location)