@article{deng2014deep, title={Deep learning: methods and applications}, author={Deng, Li and Yu, Dong and others}, journal={Foundations and trends{\textregistered} in signal processing}, volume={7}, number={3--4}, pages={197--387}, year={2014}, publisher={Now Publishers, Inc.} } @article{Wang_2005, doi = {10.1088/0964-1726/14/1/011}, url = {https://dx.doi.org/10.1088/0964-1726/14/1/011}, year = {2004}, month = {dec}, publisher = {}, volume = {14}, number = {1}, pages = {111}, author = {D H Wang and W H Liao}, title = {Modeling and control of magnetorheological fluid dampers using neural networks}, journal = {Smart Materials and Structures} } @article{RAISSI2019686, title = {Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations}, journal = {Journal of Computational Physics}, volume = {378}, pages = {686-707}, year = {2019}, issn = {0021-9991}, doi = {https://doi.org/10.1016/j.jcp.2018.10.045}, url = {https://www.sciencedirect.com/science/article/pii/S0021999118307125}, author = {M. Raissi and P. Perdikaris and G.E. Karniadakis}, keywords = {Data-driven scientific computing, Machine learning, Predictive modeling, Runge–Kutta methods, Nonlinear dynamics}, abstract = {We introduce physics-informed neural networks – neural networks that are trained to solve supervised learning tasks while respecting any given laws of physics described by general nonlinear partial differential equations. In this work, we present our developments in the context of solving two main classes of problems: data-driven solution and data-driven discovery of partial differential equations. Depending on the nature and arrangement of the available data, we devise two distinct types of algorithms, namely continuous time and discrete time models. The first type of models forms a new family of data-efficient spatio-temporal function approximators, while the latter type allows the use of arbitrarily accurate implicit Runge–Kutta time stepping schemes with unlimited number of stages. The effectiveness of the proposed framework is demonstrated through a collection of classical problems in fluids, quantum mechanics, reaction–diffusion systems, and the propagation of nonlinear shallow-water waves.} } @misc{pinns, doi = {10.48550/ARXIV.2201.05624}, url = {https://arxiv.org/abs/2201.05624}, author = {Cuomo, Salvatore and di Cola, Vincenzo Schiano and Giampaolo, Fabio and Rozza, Gianluigi and Raissi, Maziar and Piccialli, Francesco}, keywords = {Machine Learning (cs.LG), Artificial Intelligence (cs.AI), Numerical Analysis (math.NA), Data Analysis, Statistics and Probability (physics.data-an), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics, FOS: Physical sciences, FOS: Physical sciences}, title = {Scientific Machine Learning through Physics-Informed Neural Networks: Where we are and What's next}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } %%other PINN packages @article{chen2020neurodiffeq, title={Neurodiffeq: A python package for solving differential equations with neural networks}, author={Chen, Feiyu and Sondak, David and Protopapas, Pavlos and Mattheakis, Marios and Liu, Shuheng and Agarwal, Devansh and Di Giovanni, Marco}, journal={Journal of Open Source Software}, volume={5}, number={46}, pages={1931}, year={2020} } @article{lu2021deepxde, title={DeepXDE: A deep learning library for solving differential equations}, author={Lu, Lu and Meng, Xuhui and Mao, Zhiping and Karniadakis, George Em}, journal={SIAM Review}, volume={63}, number={1}, pages={208--228}, year={2021}, publisher={SIAM} } @article{mcclenny2021tensordiffeq, title={TensorDiffEq: Scalable Multi-GPU Forward and Inverse Solvers for Physics Informed Neural Networks}, author={McClenny, Levi D and Haile, Mulugeta A and Braga-Neto, Ulisses M}, journal={arXiv preprint arXiv:2103.16034}, year={2021} } @article{peng2021idrlnet, title={IDRLnet: A physics-informed neural network library}, author={Peng, Wei and Zhang, Jun and Zhou, Weien and Zhao, Xiaoyu and Yao, Wen and Chen, Xiaoqian}, journal={arXiv preprint arXiv:2107.04320}, year={2021} } @inproceedings{hennigh2021nvidia, title={NVIDIA SimNet™: An AI-accelerated multi-physics simulation framework}, author={Hennigh, Oliver and Narasimhan, Susheela and Nabian, Mohammad Amin and Subramaniam, Akshay and Tangsali, Kaustubh and Fang, Zhiwei and Rietmann, Max and Byeon, Wonmin and Choudhry, Sanjay}, booktitle={International Conference on Computational Science}, pages={447--461}, year={2021}, organization={Springer} } @article{haghighat2021sciann, title={Sciann: A keras/tensorflow wrapper for scientific computations and physics-informed deep learning using artificial neural networks}, author={Haghighat, Ehsan and Juanes, Ruben}, journal={Computer Methods in Applied Mechanics and Engineering}, volume={373}, pages={113552}, year={2021}, publisher={Elsevier} } @article{koryagin2019pydens, title={PyDEns: A python framework for solving differential equations with neural networks}, author={Koryagin, Alexander and Khudorozkov, Roman and Tsimfer, Sergey}, journal={arXiv preprint arXiv:1909.11544}, year={2019} } @article{araz2021elvet, title={Elvet--a neural network-based differential equation and variational problem solver}, author={Araz, Jack Y and Criado, Juan Carlos and Spannowsky, Michael}, journal={arXiv preprint arXiv:2103.14575}, year={2021} } @article{MAO2020112789, title = {Physics-informed neural networks for high-speed flows}, journal = {Computer Methods in Applied Mechanics and Engineering}, volume = {360}, pages = {112789}, year = {2020}, issn = {0045-7825}, doi = {https://doi.org/10.1016/j.cma.2019.112789}, url = {https://www.sciencedirect.com/science/article/pii/S0045782519306814}, author = {Zhiping Mao and Ameya D. Jagtap and George Em Karniadakis}, keywords = {Euler equations, Machine learning, Neural networks, Conservation laws, Riemann problem, Hidden fluid mechanics}, abstract = {In this work we investigate the possibility of using physics-informed neural networks (PINNs) to approximate the Euler equations that model high-speed aerodynamic flows. In particular, we solve both the forward and inverse problems in one-dimensional and two-dimensional domains. For the forward problem, we utilize the Euler equations and the initial/boundary conditions to formulate the loss function, and solve the one-dimensional Euler equations with smooth solutions and with solutions that have a contact discontinuity as well as a two-dimensional oblique shock wave problem. We demonstrate that we can capture the solutions with only a few scattered points clustered randomly around the discontinuities. For the inverse problem, motivated by mimicking the Schlieren photography experimental technique used traditionally in high-speed aerodynamics, we use the data on density gradient ∇ρ(x,t), the pressure p(x∗,t) at a specified point x=x∗ as well as the conservation laws to infer all states of interest (density, velocity and pressure fields). We present illustrative benchmark examples for both the problem with smooth solutions and Riemann problems (Sod and Lax problems) with PINNs, demonstrating that all inferred states are in good agreement with the reference solutions. Moreover, we show that the choice of the position of the point x∗ plays an important role in the learning process. In particular, for the problem with smooth solutions we can randomly choose the position of the point x∗ from the computational domain, while for the Sod or Lax problem, we have to choose the position of the point x∗ from the domain between the initial discontinuous point and the shock position of the final time. We also solve the inverse problem by combining the aforementioned data and the Euler equations in characteristic form, showing that the results obtained by using the Euler equations in characteristic form are better than that obtained by using the Euler equations in conservative form. Furthermore, we consider another type of inverse problem, specifically, we employ PINNs to learn the value of the parameter γ in the equation of state for the parameterized two-dimensional oblique wave problem by using the given data of the density, velocity and the pressure, and we identify the parameter γ accurately. Taken together, our results demonstrate that in the current form, where the conservation laws are imposed at random points, PINNs are not as accurate as traditional numerical methods for forward problems but they are superior for inverse problems that cannot even be solved with standard techniques.} } @misc{Markidis, doi = {10.48550/ARXIV.2103.09655}, url = {https://arxiv.org/abs/2103.09655}, author = {Markidis, Stefano}, keywords = {Numerical Analysis (math.NA), Distributed, Parallel, and Cluster Computing (cs.DC), Computational Physics (physics.comp-ph), FOS: Mathematics, FOS: Mathematics, FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Physical sciences, FOS: Physical sciences}, title = {The Old and the New: Can Physics-Informed Deep-Learning Replace Traditional Linear Solvers?}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } @article{Kharazmi_2021, doi = {10.1016/j.cma.2020.113547}, url = {https://doi.org/10.1016%2Fj.cma.2020.113547}, year = 2021, month = {feb}, publisher = {Elsevier {BV} }, volume = {374}, pages = {113547}, author = {Ehsan Kharazmi and Zhongqiang Zhang and George E.M. Karniadakis}, title = {hp-{VPINNs}: Variational physics-informed neural networks with domain decomposition}, journal = {Computer Methods in Applied Mechanics and Engineering} } @article{YUCESAN2022108875, title = {A hybrid physics-informed neural network for main bearing fatigue prognosis under grease quality variation}, journal = {Mechanical Systems and Signal Processing}, volume = {171}, pages = {108875}, year = {2022}, issn = {0888-3270}, doi = {https://doi.org/10.1016/j.ymssp.2022.108875}, url = {https://www.sciencedirect.com/science/article/pii/S088832702200070X}, author = {Yigit A. Yucesan and Felipe A.C. Viana}, keywords = {hybrid physics-informed neural network, Applied machine learning, Wind turbine bearing fatigue, Uncertainty quantification}, abstract = {Fatigue life of a wind turbine main bearing is drastically affected by the state of the grease used as lubricant. Unfortunately monitoring the grease condition through predictive models can be a daunting task due to uncertainties associated with degradation mechanism and variations in grease batch quality. Eventually, discrepancies in the grease life predictions caused by variable grease quality may lead up to inaccurate bearing fatigue life predictions. The convoluted nature of the problem requires a novel solution approach; and in this contribution, we propose a new hybrid physics-informed neural network model. We construct a hybrid model for bearing fatigue damage accumulation embedded as a recurrent neural network cell, where reduced-order physics models used for bearing fatigue damage accumulation, and neural networks represent grease degradation mechanism that quantifies grease damage that ultimately accelerates bearing fatigue. We outline a two-step probabilistic approach to quantify the grease quality variation. In the first step, we make use of the hybrid model to learn the grease degradation when the quality is the median of the distribution. In the second step, we take the median predictor from the first step and track the quantiles of the quality distribution by examining grease samples of each wind turbine. We finally showcase our approach with a numerical experiment, where we test the effect of the random realizations of quality variation and the number of sampled turbines on the performance of the model. Results of the numerical experiment indicate that given enough samples from different wind turbines, our method can successfully learn the median grease degradation and uncertainty about it. With this predictive model, we are able to optimize the regreasing intervals on a turbine-by-turbine basis. The source codes and links to the data can be found in the following GitHub repository https://github.com/PML-UCF/pinn_wind_bearing.} } @misc{strazdemo, doi = {10.48550/ARXIV.2110.13530}, url = {https://arxiv.org/abs/2110.13530}, author = {Demo, Nicola and Strazzullo, Maria and Rozza, Gianluigi}, keywords = {Machine Learning (cs.LG), Numerical Analysis (math.NA), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics}, title = {An extended physics informed neural network for preliminary analysis of parametric optimal control problems}, publisher = {arXiv}, year = {2021}, copyright = {arXiv.org perpetual, non-exclusive license} } @misc{adam, doi = {10.48550/ARXIV.1412.6980}, url = {https://arxiv.org/abs/1412.6980}, author = {Kingma, Diederik P. and Ba, Jimmy}, keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Adam: A Method for Stochastic Optimization}, publisher = {arXiv}, year = {2014}, copyright = {arXiv.org perpetual, non-exclusive license} } @misc{ccnn, doi = {10.48550/ARXIV.2210.13416}, url = {https://arxiv.org/abs/2210.13416}, author = {Coscia, Dario and Meneghetti, Laura and Demo, Nicola and Stabile, Giovanni and Rozza, Gianluigi}, keywords = {Machine Learning (cs.LG), Numerical Analysis (math.NA), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics}, title = {A Continuous Convolutional Trainable Filter for Modelling Unstructured Data}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} }