Create paper.md (#74)
* Create paper.md * Create draft-pdf.yml * Update paper.md --------- Co-authored-by: Dario Coscia <93731561+dario-coscia@users.noreply.github.com>
This commit is contained in:
23
.github/workflows/draft-pdf.yml
vendored
Normal file
23
.github/workflows/draft-pdf.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
paper:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Paper Draft
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Build draft PDF
|
||||||
|
uses: openjournals/openjournals-draft-action@master
|
||||||
|
with:
|
||||||
|
journal: joss
|
||||||
|
# This should be the path to the paper within your repo.
|
||||||
|
paper-path: joss/paper.md
|
||||||
|
- name: Upload
|
||||||
|
uses: actions/upload-artifact@v1
|
||||||
|
with:
|
||||||
|
name: paper
|
||||||
|
# This is the output path where Pandoc will write the compiled
|
||||||
|
# PDF. Note, this should be the same directory as the input
|
||||||
|
# paper.md
|
||||||
|
path: joss/paper.pdf
|
||||||
0
joss/API_color.pdf
Normal file
0
joss/API_color.pdf
Normal file
BIN
joss/API_color.png
Normal file
BIN
joss/API_color.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 95 KiB |
240
joss/paper.bib
Normal file
240
joss/paper.bib
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
@article{deng2014deep,
|
||||||
|
title={Deep learning: methods and applications},
|
||||||
|
author={Deng, Li and Yu, Dong and others},
|
||||||
|
journal={Foundations and trends{\textregistered} in signal processing},
|
||||||
|
volume={7},
|
||||||
|
number={3--4},
|
||||||
|
pages={197--387},
|
||||||
|
year={2014},
|
||||||
|
publisher={Now Publishers, Inc.}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{Wang_2005,
|
||||||
|
doi = {10.1088/0964-1726/14/1/011},
|
||||||
|
url = {https://dx.doi.org/10.1088/0964-1726/14/1/011},
|
||||||
|
year = {2004},
|
||||||
|
month = {dec},
|
||||||
|
publisher = {},
|
||||||
|
volume = {14},
|
||||||
|
number = {1},
|
||||||
|
pages = {111},
|
||||||
|
author = {D H Wang and W H Liao},
|
||||||
|
title = {Modeling and control of magnetorheological fluid dampers using neural networks},
|
||||||
|
journal = {Smart Materials and Structures}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{RAISSI2019686,
|
||||||
|
title = {Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations},
|
||||||
|
journal = {Journal of Computational Physics},
|
||||||
|
volume = {378},
|
||||||
|
pages = {686-707},
|
||||||
|
year = {2019},
|
||||||
|
issn = {0021-9991},
|
||||||
|
doi = {https://doi.org/10.1016/j.jcp.2018.10.045},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S0021999118307125},
|
||||||
|
author = {M. Raissi and P. Perdikaris and G.E. Karniadakis},
|
||||||
|
keywords = {Data-driven scientific computing, Machine learning, Predictive modeling, Runge–Kutta methods, Nonlinear dynamics},
|
||||||
|
abstract = {We introduce physics-informed neural networks – neural networks that are trained to solve supervised learning tasks while respecting any given laws of physics described by general nonlinear partial differential equations. In this work, we present our developments in the context of solving two main classes of problems: data-driven solution and data-driven discovery of partial differential equations. Depending on the nature and arrangement of the available data, we devise two distinct types of algorithms, namely continuous time and discrete time models. The first type of models forms a new family of data-efficient spatio-temporal function approximators, while the latter type allows the use of arbitrarily accurate implicit Runge–Kutta time stepping schemes with unlimited number of stages. The effectiveness of the proposed framework is demonstrated through a collection of classical problems in fluids, quantum mechanics, reaction–diffusion systems, and the propagation of nonlinear shallow-water waves.}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{pinns,
|
||||||
|
doi = {10.48550/ARXIV.2201.05624},
|
||||||
|
|
||||||
|
url = {https://arxiv.org/abs/2201.05624},
|
||||||
|
|
||||||
|
author = {Cuomo, Salvatore and di Cola, Vincenzo Schiano and Giampaolo, Fabio and Rozza, Gianluigi and Raissi, Maziar and Piccialli, Francesco},
|
||||||
|
|
||||||
|
keywords = {Machine Learning (cs.LG), Artificial Intelligence (cs.AI), Numerical Analysis (math.NA), Data Analysis, Statistics and Probability (physics.data-an), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics, FOS: Physical sciences, FOS: Physical sciences},
|
||||||
|
|
||||||
|
title = {Scientific Machine Learning through Physics-Informed Neural Networks: Where we are and What's next},
|
||||||
|
|
||||||
|
publisher = {arXiv},
|
||||||
|
|
||||||
|
year = {2022},
|
||||||
|
|
||||||
|
copyright = {arXiv.org perpetual, non-exclusive license}
|
||||||
|
}
|
||||||
|
|
||||||
|
%%other PINN packages
|
||||||
|
@article{chen2020neurodiffeq,
|
||||||
|
title={Neurodiffeq: A python package for solving differential equations with neural networks},
|
||||||
|
author={Chen, Feiyu and Sondak, David and Protopapas, Pavlos and Mattheakis, Marios and Liu, Shuheng and Agarwal, Devansh and Di Giovanni, Marco},
|
||||||
|
journal={Journal of Open Source Software},
|
||||||
|
volume={5},
|
||||||
|
number={46},
|
||||||
|
pages={1931},
|
||||||
|
year={2020}
|
||||||
|
}
|
||||||
|
@article{lu2021deepxde,
|
||||||
|
title={DeepXDE: A deep learning library for solving differential equations},
|
||||||
|
author={Lu, Lu and Meng, Xuhui and Mao, Zhiping and Karniadakis, George Em},
|
||||||
|
journal={SIAM Review},
|
||||||
|
volume={63},
|
||||||
|
number={1},
|
||||||
|
pages={208--228},
|
||||||
|
year={2021},
|
||||||
|
publisher={SIAM}
|
||||||
|
}
|
||||||
|
@article{mcclenny2021tensordiffeq,
|
||||||
|
title={TensorDiffEq: Scalable Multi-GPU Forward and Inverse Solvers for Physics Informed Neural Networks},
|
||||||
|
author={McClenny, Levi D and Haile, Mulugeta A and Braga-Neto, Ulisses M},
|
||||||
|
journal={arXiv preprint arXiv:2103.16034},
|
||||||
|
year={2021}
|
||||||
|
}
|
||||||
|
@article{peng2021idrlnet,
|
||||||
|
title={IDRLnet: A physics-informed neural network library},
|
||||||
|
author={Peng, Wei and Zhang, Jun and Zhou, Weien and Zhao, Xiaoyu and Yao, Wen and Chen, Xiaoqian},
|
||||||
|
journal={arXiv preprint arXiv:2107.04320},
|
||||||
|
year={2021}
|
||||||
|
}
|
||||||
|
@inproceedings{hennigh2021nvidia,
|
||||||
|
title={NVIDIA SimNet™: An AI-accelerated multi-physics simulation framework},
|
||||||
|
author={Hennigh, Oliver and Narasimhan, Susheela and Nabian, Mohammad Amin and Subramaniam, Akshay and Tangsali, Kaustubh and Fang, Zhiwei and Rietmann, Max and Byeon, Wonmin and Choudhry, Sanjay},
|
||||||
|
booktitle={International Conference on Computational Science},
|
||||||
|
pages={447--461},
|
||||||
|
year={2021},
|
||||||
|
organization={Springer}
|
||||||
|
}
|
||||||
|
@article{haghighat2021sciann,
|
||||||
|
title={Sciann: A keras/tensorflow wrapper for scientific computations and physics-informed deep learning using artificial neural networks},
|
||||||
|
author={Haghighat, Ehsan and Juanes, Ruben},
|
||||||
|
journal={Computer Methods in Applied Mechanics and Engineering},
|
||||||
|
volume={373},
|
||||||
|
pages={113552},
|
||||||
|
year={2021},
|
||||||
|
publisher={Elsevier}
|
||||||
|
}
|
||||||
|
@article{koryagin2019pydens,
|
||||||
|
title={PyDEns: A python framework for solving differential equations with neural networks},
|
||||||
|
author={Koryagin, Alexander and Khudorozkov, Roman and Tsimfer, Sergey},
|
||||||
|
journal={arXiv preprint arXiv:1909.11544},
|
||||||
|
year={2019}
|
||||||
|
}
|
||||||
|
@article{araz2021elvet,
|
||||||
|
title={Elvet--a neural network-based differential equation and variational problem solver},
|
||||||
|
author={Araz, Jack Y and Criado, Juan Carlos and Spannowsky, Michael},
|
||||||
|
journal={arXiv preprint arXiv:2103.14575},
|
||||||
|
year={2021}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{MAO2020112789,
|
||||||
|
title = {Physics-informed neural networks for high-speed flows},
|
||||||
|
journal = {Computer Methods in Applied Mechanics and Engineering},
|
||||||
|
volume = {360},
|
||||||
|
pages = {112789},
|
||||||
|
year = {2020},
|
||||||
|
issn = {0045-7825},
|
||||||
|
doi = {https://doi.org/10.1016/j.cma.2019.112789},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S0045782519306814},
|
||||||
|
author = {Zhiping Mao and Ameya D. Jagtap and George Em Karniadakis},
|
||||||
|
keywords = {Euler equations, Machine learning, Neural networks, Conservation laws, Riemann problem, Hidden fluid mechanics},
|
||||||
|
abstract = {In this work we investigate the possibility of using physics-informed neural networks (PINNs) to approximate the Euler equations that model high-speed aerodynamic flows. In particular, we solve both the forward and inverse problems in one-dimensional and two-dimensional domains. For the forward problem, we utilize the Euler equations and the initial/boundary conditions to formulate the loss function, and solve the one-dimensional Euler equations with smooth solutions and with solutions that have a contact discontinuity as well as a two-dimensional oblique shock wave problem. We demonstrate that we can capture the solutions with only a few scattered points clustered randomly around the discontinuities. For the inverse problem, motivated by mimicking the Schlieren photography experimental technique used traditionally in high-speed aerodynamics, we use the data on density gradient ∇ρ(x,t), the pressure p(x∗,t) at a specified point x=x∗ as well as the conservation laws to infer all states of interest (density, velocity and pressure fields). We present illustrative benchmark examples for both the problem with smooth solutions and Riemann problems (Sod and Lax problems) with PINNs, demonstrating that all inferred states are in good agreement with the reference solutions. Moreover, we show that the choice of the position of the point x∗ plays an important role in the learning process. In particular, for the problem with smooth solutions we can randomly choose the position of the point x∗ from the computational domain, while for the Sod or Lax problem, we have to choose the position of the point x∗ from the domain between the initial discontinuous point and the shock position of the final time. We also solve the inverse problem by combining the aforementioned data and the Euler equations in characteristic form, showing that the results obtained by using the Euler equations in characteristic form are better than that obtained by using the Euler equations in conservative form. Furthermore, we consider another type of inverse problem, specifically, we employ PINNs to learn the value of the parameter γ in the equation of state for the parameterized two-dimensional oblique wave problem by using the given data of the density, velocity and the pressure, and we identify the parameter γ accurately. Taken together, our results demonstrate that in the current form, where the conservation laws are imposed at random points, PINNs are not as accurate as traditional numerical methods for forward problems but they are superior for inverse problems that cannot even be solved with standard techniques.}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{Markidis,
|
||||||
|
doi = {10.48550/ARXIV.2103.09655},
|
||||||
|
|
||||||
|
url = {https://arxiv.org/abs/2103.09655},
|
||||||
|
|
||||||
|
author = {Markidis, Stefano},
|
||||||
|
|
||||||
|
keywords = {Numerical Analysis (math.NA), Distributed, Parallel, and Cluster Computing (cs.DC), Computational Physics (physics.comp-ph), FOS: Mathematics, FOS: Mathematics, FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Physical sciences, FOS: Physical sciences},
|
||||||
|
|
||||||
|
title = {The Old and the New: Can Physics-Informed Deep-Learning Replace Traditional Linear Solvers?},
|
||||||
|
|
||||||
|
publisher = {arXiv},
|
||||||
|
|
||||||
|
year = {2021},
|
||||||
|
|
||||||
|
copyright = {arXiv.org perpetual, non-exclusive license}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{Kharazmi_2021,
|
||||||
|
doi = {10.1016/j.cma.2020.113547},
|
||||||
|
|
||||||
|
url = {https://doi.org/10.1016%2Fj.cma.2020.113547},
|
||||||
|
|
||||||
|
year = 2021,
|
||||||
|
month = {feb},
|
||||||
|
|
||||||
|
publisher = {Elsevier {BV}
|
||||||
|
},
|
||||||
|
|
||||||
|
volume = {374},
|
||||||
|
|
||||||
|
pages = {113547},
|
||||||
|
|
||||||
|
author = {Ehsan Kharazmi and Zhongqiang Zhang and George E.M. Karniadakis},
|
||||||
|
|
||||||
|
title = {hp-{VPINNs}: Variational physics-informed neural networks with domain decomposition},
|
||||||
|
|
||||||
|
journal = {Computer Methods in Applied Mechanics and Engineering}
|
||||||
|
}
|
||||||
|
|
||||||
|
@article{YUCESAN2022108875,
|
||||||
|
title = {A hybrid physics-informed neural network for main bearing fatigue prognosis under grease quality variation},
|
||||||
|
journal = {Mechanical Systems and Signal Processing},
|
||||||
|
volume = {171},
|
||||||
|
pages = {108875},
|
||||||
|
year = {2022},
|
||||||
|
issn = {0888-3270},
|
||||||
|
doi = {https://doi.org/10.1016/j.ymssp.2022.108875},
|
||||||
|
url = {https://www.sciencedirect.com/science/article/pii/S088832702200070X},
|
||||||
|
author = {Yigit A. Yucesan and Felipe A.C. Viana},
|
||||||
|
keywords = {hybrid physics-informed neural network, Applied machine learning, Wind turbine bearing fatigue, Uncertainty quantification},
|
||||||
|
abstract = {Fatigue life of a wind turbine main bearing is drastically affected by the state of the grease used as lubricant. Unfortunately monitoring the grease condition through predictive models can be a daunting task due to uncertainties associated with degradation mechanism and variations in grease batch quality. Eventually, discrepancies in the grease life predictions caused by variable grease quality may lead up to inaccurate bearing fatigue life predictions. The convoluted nature of the problem requires a novel solution approach; and in this contribution, we propose a new hybrid physics-informed neural network model. We construct a hybrid model for bearing fatigue damage accumulation embedded as a recurrent neural network cell, where reduced-order physics models used for bearing fatigue damage accumulation, and neural networks represent grease degradation mechanism that quantifies grease damage that ultimately accelerates bearing fatigue. We outline a two-step probabilistic approach to quantify the grease quality variation. In the first step, we make use of the hybrid model to learn the grease degradation when the quality is the median of the distribution. In the second step, we take the median predictor from the first step and track the quantiles of the quality distribution by examining grease samples of each wind turbine. We finally showcase our approach with a numerical experiment, where we test the effect of the random realizations of quality variation and the number of sampled turbines on the performance of the model. Results of the numerical experiment indicate that given enough samples from different wind turbines, our method can successfully learn the median grease degradation and uncertainty about it. With this predictive model, we are able to optimize the regreasing intervals on a turbine-by-turbine basis. The source codes and links to the data can be found in the following GitHub repository https://github.com/PML-UCF/pinn_wind_bearing.}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{strazdemo,
|
||||||
|
doi = {10.48550/ARXIV.2110.13530},
|
||||||
|
|
||||||
|
url = {https://arxiv.org/abs/2110.13530},
|
||||||
|
|
||||||
|
author = {Demo, Nicola and Strazzullo, Maria and Rozza, Gianluigi},
|
||||||
|
|
||||||
|
keywords = {Machine Learning (cs.LG), Numerical Analysis (math.NA), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics},
|
||||||
|
|
||||||
|
title = {An extended physics informed neural network for preliminary analysis of parametric optimal control problems},
|
||||||
|
|
||||||
|
publisher = {arXiv},
|
||||||
|
|
||||||
|
year = {2021},
|
||||||
|
|
||||||
|
copyright = {arXiv.org perpetual, non-exclusive license}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{adam,
|
||||||
|
doi = {10.48550/ARXIV.1412.6980},
|
||||||
|
|
||||||
|
url = {https://arxiv.org/abs/1412.6980},
|
||||||
|
|
||||||
|
author = {Kingma, Diederik P. and Ba, Jimmy},
|
||||||
|
|
||||||
|
keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||||||
|
|
||||||
|
title = {Adam: A Method for Stochastic Optimization},
|
||||||
|
|
||||||
|
publisher = {arXiv},
|
||||||
|
|
||||||
|
year = {2014},
|
||||||
|
|
||||||
|
copyright = {arXiv.org perpetual, non-exclusive license}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{ccnn,
|
||||||
|
doi = {10.48550/ARXIV.2210.13416},
|
||||||
|
|
||||||
|
url = {https://arxiv.org/abs/2210.13416},
|
||||||
|
|
||||||
|
author = {Coscia, Dario and Meneghetti, Laura and Demo, Nicola and Stabile, Giovanni and Rozza, Gianluigi},
|
||||||
|
|
||||||
|
keywords = {Machine Learning (cs.LG), Numerical Analysis (math.NA), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics},
|
||||||
|
|
||||||
|
title = {A Continuous Convolutional Trainable Filter for Modelling Unstructured Data},
|
||||||
|
|
||||||
|
publisher = {arXiv},
|
||||||
|
|
||||||
|
year = {2022},
|
||||||
|
|
||||||
|
copyright = {Creative Commons Attribution 4.0 International}
|
||||||
|
}
|
||||||
92
joss/paper.md
Normal file
92
joss/paper.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
---
|
||||||
|
title: 'Physics-Informed Neural networks for Advance modeling'
|
||||||
|
tags:
|
||||||
|
- python
|
||||||
|
- deep learning
|
||||||
|
- physics-informed neural networks
|
||||||
|
- scientific machine learning
|
||||||
|
- differential equations.
|
||||||
|
authors:
|
||||||
|
- name: Dario Coscia
|
||||||
|
orcid: 0000-0001-8833-6833
|
||||||
|
equal-contrib: true
|
||||||
|
affiliation: "1"
|
||||||
|
- name: Anna Ivagnes
|
||||||
|
orcid: 0000-0002-2369-4493
|
||||||
|
equal-contrib: true
|
||||||
|
affiliation: "1"
|
||||||
|
- name: Nicola Demo
|
||||||
|
orcid: 0000-0003-3107-9738
|
||||||
|
equal-contrib: true
|
||||||
|
affiliation: "1"
|
||||||
|
- name: Gianluigi Rozza
|
||||||
|
orcid: 0000-0002-0810-8812
|
||||||
|
equal-contrib: true
|
||||||
|
affiliation: "1"
|
||||||
|
affiliations:
|
||||||
|
- name: SISSA, International School of Advanced Studies, Via Bonomea 265, Trieste, Italy
|
||||||
|
index: 1
|
||||||
|
date: 15 March 2023
|
||||||
|
bibliography: paper.bib
|
||||||
|
---
|
||||||
|
|
||||||
|
# Introduction
|
||||||
|
Artificial Intelligence (AI) strategies are massively emerging in several fields of academia and industrial research [@deng2014deep, @Wang_2005] due to the growing disposal of data, as well as the great improvement in computational resources. In the area of applied mathematics and simulations, AI strategies are being used to solve problems where classical methods fail [@pinns].
|
||||||
|
However, the amount of data required to analyze complex systems is often insufficient to make AI predictions reliable and robust. Physics-informed neural networks (PINNs) have been formulated [@RAISSI2019686] to overcome the issues of missing data, by incorporating the physical knowledge into the neural network training. Thus, PINNs aim to approximate any differential equation by solving a minimization problem in an unsupervised learning setting, learning the unknown field in order to preserve the imposed constraints (boundaries and physical residuals). Formally, we consider the general form of a differential equation, which typically presents the most challenging issues from a numerical point of view:
|
||||||
|
\begin{equation}
|
||||||
|
\begin{split}
|
||||||
|
\mathcal{F}(\pmb{u}(\pmb{z});\alpha)&=\pmb{f}(\pmb{z}) \quad \pmb{z} \in \Omega,\\
|
||||||
|
\mathcal{B}(\pmb{u}(\pmb{z}))&=\pmb{g}(\pmb{z}) \quad \pmb{z} \in \partial\Omega,
|
||||||
|
\end{split}
|
||||||
|
\end{equation}
|
||||||
|
where $\Omega\subset\mathbb{R}^d$ is the domain and $\partial\Omega$ the boundaries of the latter. In particular, $\pmb{z}$ indicates the spatio-temporal coordinates vector, $\pmb{u}$ the unknown field, $\alpha$ the physical parameters, $\pmb{f}$ the forcing term, and $\mathcal{F}$ the differential operator. In addition, $\mathcal{B}$ identifies the operator indicating arbitrary initial or boundary conditions and $\pmb{g}$ the boundary function. The PINN's objective is to find a solution to the problem, which is done by approximating the true solution $\pmb{u}$ with a neural network $\hat{\pmb{u}}_{\theta} : \Omega \rightarrow \mathbb{R}$, with $\theta$ network's parameters. Such a model is trained to find the optimal parameters $\theta^*$ whose minimizing the physical loss function depending on the physical conditions $\mathcal{L}_{\mathcal{F}}$, boundary conditions $\mathcal{L}_{\mathcal{B}}$ and, if available, real data $\mathcal{L}_{\textrm{data}}$:
|
||||||
|
|
||||||
|
\begin{equation}
|
||||||
|
\theta^* = \underset{\theta}{\mathrm{argmin}} \mathcal{L} =
|
||||||
|
\underset{\theta}{\mathrm{argmin}} (\mathcal{L}_{\mathcal{F}} + \mathcal{L}_{\mathcal{B}} + \mathcal{L}_{\text{data}}).
|
||||||
|
\end{equation}
|
||||||
|
|
||||||
|
|
||||||
|
The PINNs framework is completely general and applicable to different types of ordinary differential equations (ODEs), or partial differential equations (PDEs). Nevertheless, the loss function strictly depends on the problem chosen to be solved, since different operators or boundary conditions lead to different losses, increasing the difficulty to write a general and portable code for different problems.
|
||||||
|
|
||||||
|
{ width=20% }
|
||||||
|
|
||||||
|
\textbf{PINA}, \emph{Physics-Informed Neural networks for Advance modeling}, is a Python library built using PyTorch that provides a user-friendly API to formalize a large variety of physical problems and solve it using PINNs easily.
|
||||||
|
|
||||||
|
# Statement of need
|
||||||
|
PINA is an open-source Python library that provides an intuitive interface for the approximated resolution of Ordinary Differential Equations and Partial Differential Equations using a deep learning paradigm, in particular via PINNs.
|
||||||
|
The gain of popularity for PINNs in recent years, and the evolution of open-source frameworks, such as TensorFlow, Keras, and PyTorch, led to the development of several libraries, whose focus is the exploitation of PINNs to approximately solve ODEs and PDEs.
|
||||||
|
We here mention some PyTorch-based libraries, \verb+NeuroDiffEq+ [@chen2020neurodiffeq], \verb+IDRLNet+ [@peng2021idrlnet], and some TensorFlow-based libraries, such as \verb+DeepXDE+ [@lu2021deepxde], \verb+TensorDiffEq+ [@mcclenny2021tensordiffeq], \verb+SciANN+ [@haghighat2021sciann] (which is both TensorFlow and Keras-based), \verb+PyDEns+ [@koryagin2019pydens], \verb+Elvet+ [@araz2021elvet], \verb+NVIDIA SimNet+ [@hennigh2021nvidia].
|
||||||
|
Among all these frameworks, PINA wants to emerge for its easiness of usage, allowing the users to quickly formulate the problem at hand and solve it, resulting in an intuitive frameworks designed by researchers for researchers.
|
||||||
|
We have decided to build it on top of PyTorch in order to exploit the \verb+autograd+ module, as well as all the other features implemented in this framework. The final outcome is then a library with incremental complexity, capable of being used by the new users to perform the first investigation using PINNs, but also as a core framework to actively develop new features to improve the discussed methodology.
|
||||||
|
|
||||||
|
The high-level structure of the package is illustrated in Figure \ref{API_visual}; the approximated solution of a differential equation can be implemented using PINA in a few lines of code thanks to the intuitive and user-friendly interface.
|
||||||
|
Besides the user-friendly interface, PINA also offers several examples and tutorials, aiming to guide new users toward an easy exploration of the software features. The online documentation is released at \url{https://mathlab.github.io/PINA/}, while the robustness of the package is continuously monitored by unit tests.
|
||||||
|
|
||||||
|
The API visualization in Figure \ref{API_visual} shows that a complete workflow in PINA is characterized by 3 main steps: the problem formulation, the model definition, i.e. the structure of the neural network used, and the PINN training, eventually followed by the data visualization.
|
||||||
|
|
||||||
|
{ width=70% }
|
||||||
|
|
||||||
|
## Problem definition in PINA
|
||||||
|
The first step is the formalization of the problem.
|
||||||
|
The problem definition in the PINA framework is inherited from one or more problem classes (at the moment the available classes are \verb+SpatialProblem+, \verb+TimeDependentProblem+, \verb+ParametricProblem+), depending on the nature of the problem treated.
|
||||||
|
The user has to include in the problem formulation the following components:
|
||||||
|
\begin{itemize}
|
||||||
|
\item the information about the domain, i.e. the spatial and temporal variables, the parameters of the problem (if any), with the corresponding range of variation;
|
||||||
|
\item the output variables, i.e. the unknowns of the problem;
|
||||||
|
\item the conditions that the neural network has to satisfy, i.e. the differential equations, the boundary and initial conditions.
|
||||||
|
\end{itemize}
|
||||||
|
We highlight that in PINA we abandoned the classical division between physical loss, boundary loss, and data loss: all these terms are encapsulated within the \verb+Condition+ class, in order to keep the framework as general as possible. The users can indeed define all the constraints the unknown field needs to satisfy, avoiding any forced structure in the formulation and allowing them to mix heterogeneous constraints --- e.g. data values, differential boundary conditions.
|
||||||
|
|
||||||
|
## Model definition in PINA
|
||||||
|
The second fundamental step is the definition of the model of the neural network employed to find the approximated solution to the differential problem in question.
|
||||||
|
In PINA, the user has the possibility to use either a custom \verb+torch+ network model and translate it to a PINA model (with the class \verb+Network+), or to exploit one of the built-in models such as \verb+FeedForward+, \verb+MultiFeedForward+ and \verb+DeepONet+, defining their characteristics during instantiation --- i.e. number of layers, number of neurons, activation functions. The list of the built-in models will be extended in the next release of the library.
|
||||||
|
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
|
||||||
|
We thank our colleagues and research partners who contributed in the
|
||||||
|
former and current developments of PINA library.
|
||||||
|
This work was partially funded by European Union Funding for Research and Innovation — Horizon 2020 Program — in the framework of European Research Council Executive Agency: H2020 ERC CoG 2015 AROMA-CFD project 681447 “Advanced Reduced Order Methods with Applications in Computational Fluid Dynamics” P.I. Professor Gianluigi Rozza.
|
||||||
|
|
||||||
|
# References
|
||||||
BIN
joss/pina_logo.png
Normal file
BIN
joss/pina_logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 51 KiB |
0
joss/pinn_base.pdf
Normal file
0
joss/pinn_base.pdf
Normal file
0
joss/pinn_feat.pdf
Normal file
0
joss/pinn_feat.pdf
Normal file
0
joss/pinn_learn.pdf
Normal file
0
joss/pinn_learn.pdf
Normal file
Reference in New Issue
Block a user