export tutorials changed in 62fe6f8 (#670)

Co-authored-by: dario-coscia <dario-coscia@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2025-10-16 15:59:25 +02:00
committed by GitHub
parent 62fe6f81b5
commit 6d7ce0e4e2
4 changed files with 10234 additions and 86 deletions

View File

@@ -95,7 +95,8 @@
"metadata": {},
"outputs": [],
"source": [
"sigma, rho, beta = 10.0, 28.0, 8/3\n",
"sigma, rho, beta = 10.0, 28.0, 8 / 3\n",
"\n",
"\n",
"def lorenz(x, t):\n",
" dx = np.zeros(3)\n",
@@ -104,22 +105,23 @@
" dx[2] = x[0] * x[1] - beta * x[2]\n",
" return dx\n",
"\n",
"n_ic_s = 200 # number of initial conditions\n",
"T = 1000 # number of timesteps\n",
"dt = 0.001 # timestep\n",
"t = np.linspace(0, (T-1)*dt, T)\n",
"\n",
"n_ic_s = 200 # number of initial conditions\n",
"T = 1000 # number of timesteps\n",
"dt = 0.001 # timestep\n",
"t = np.linspace(0, (T - 1) * dt, T)\n",
"dim = 3\n",
"\n",
"x0s = (np.random.rand(n_ic_s, dim) - 0.5) * 30.0 # Random initial conditions\n",
"x0s = (np.random.rand(n_ic_s, dim) - 0.5) * 30.0 # Random initial conditions\n",
"\n",
"X = np.zeros((n_ic_s, T, dim))\n",
"for i in range(n_ic_s):\n",
" X[i] = odeint(lorenz, x0s[i], t) # integrated trajectories\n",
" X[i] = odeint(lorenz, x0s[i], t) # integrated trajectories\n",
"\n",
"\n",
"def plot_n_conditions(X, n_to_plot):\n",
" fig = plt.figure(figsize=(6, 5))\n",
" ax = fig.add_subplot(111, projection='3d')\n",
" ax = fig.add_subplot(111, projection=\"3d\")\n",
"\n",
" for i in range(n_to_plot):\n",
" ax.plot(X[i, :, 0], X[i, :, 1], X[i, :, 2], lw=1)\n",
@@ -131,6 +133,7 @@
" plt.tight_layout()\n",
" plt.show()\n",
"\n",
"\n",
"plot_n_conditions(X, n_ic_s)"
]
},
@@ -178,7 +181,9 @@
"outputs": [],
"source": [
"dXdt = np.gradient(X, t, axis=1, edge_order=2)\n",
"X_torch = torch.tensor(X, dtype=torch.float32).reshape((-1, dim)) # X_torch has shape (B, dim)\n",
"X_torch = torch.tensor(X, dtype=torch.float32).reshape(\n",
" (-1, dim)\n",
") # X_torch has shape (B, dim)\n",
"dXdt_torch = torch.tensor(dXdt, dtype=torch.float32).reshape((-1, dim))"
]
},
@@ -220,19 +225,21 @@
"outputs": [],
"source": [
"function_dict = {\n",
" \"1\": lambda u: torch.ones(u.shape[0], 1, device=u.device), # 1\n",
" \"x\": lambda u: u[\"x\"], # x\n",
" \"y\": lambda u: u[\"y\"], # y\n",
" \"z\": lambda u: u[\"z\"], # z\n",
" \"x^2\": lambda u: u[\"x\"].pow(2), # x^2\n",
" \"y^2\": lambda u: u[\"y\"].pow(2), # y^2\n",
" \"z^2\": lambda u: u[\"z\"].pow(2), # z^2\n",
" \"xy\": lambda u: u[\"x\"]*u[\"y\"], # xy\n",
" \"xz\": lambda u: u[\"x\"]*u[\"z\"], # xz\n",
" \"yz\": lambda u: u[\"y\"]*u[\"z\"], # yz\n",
" \"1\": lambda u: torch.ones(u.shape[0], 1, device=u.device), # 1\n",
" \"x\": lambda u: u[\"x\"], # x\n",
" \"y\": lambda u: u[\"y\"], # y\n",
" \"z\": lambda u: u[\"z\"], # z\n",
" \"x^2\": lambda u: u[\"x\"].pow(2), # x^2\n",
" \"y^2\": lambda u: u[\"y\"].pow(2), # y^2\n",
" \"z^2\": lambda u: u[\"z\"].pow(2), # z^2\n",
" \"xy\": lambda u: u[\"x\"] * u[\"y\"], # xy\n",
" \"xz\": lambda u: u[\"x\"] * u[\"z\"], # xz\n",
" \"yz\": lambda u: u[\"y\"] * u[\"z\"], # yz\n",
"}\n",
"\n",
"function_library = [_function for _function in function_dict.values()] # input of the model constructor"
"function_library = [\n",
" _function for _function in function_dict.values()\n",
"] # input of the model constructor"
]
},
{
@@ -279,11 +286,11 @@
"metadata": {},
"outputs": [],
"source": [
"solver = SupervisedSolver(\n",
"solver = SupervisedSolver(\n",
" problem,\n",
" model=model,\n",
" optimizer=TorchOptimizer(torch.optim.Adam, lr=1e-3, weight_decay=_lambda),\n",
" use_lt=False\n",
" use_lt=False,\n",
")"
]
},
@@ -304,7 +311,7 @@
"source": [
"trainer = Trainer(\n",
" solver,\n",
" accelerator='cpu',\n",
" accelerator=\"cpu\",\n",
" max_epochs=150,\n",
" train_size=0.8,\n",
" val_size=0.1,\n",
@@ -348,12 +355,14 @@
" terms = []\n",
" for i in range(library_dim):\n",
" coefficient = Xi[i, j]\n",
" if abs(coefficient) > tau: # do not print coefficients that are going to be pruned\n",
" if (\n",
" abs(coefficient) > tau\n",
" ): # do not print coefficients that are going to be pruned\n",
" function_name = function_names[i]\n",
" terms.append(f\"{coefficient:+.2f} * {function_name} \")\n",
" \n",
"\n",
" equation = \" \".join(terms)\n",
" \n",
"\n",
" if not equation:\n",
" equation = \"0\"\n",
" if vars is not None:\n",
@@ -364,9 +373,9 @@
"\n",
"tau = 1e-1\n",
"\n",
"print_coefficients(model, list(function_dict.keys()), tau, vars=['x', 'y', 'z'])\n",
"print_coefficients(model, list(function_dict.keys()), tau, vars=[\"x\", \"y\", \"z\"])\n",
"\n",
"with torch.no_grad(): # prune coefficients\n",
"with torch.no_grad(): # prune coefficients\n",
" mask = torch.abs(model.coefficients) >= tau\n",
" model.coefficients.data *= mask"
]
@@ -397,13 +406,16 @@
"metadata": {},
"outputs": [],
"source": [
"def SINDy_equations(x, t): # we need a numpy array for odeint\n",
"def SINDy_equations(x, t): # we need a numpy array for odeint\n",
" with torch.no_grad():\n",
" x_torch = torch.tensor(x, dtype=torch.float32).unsqueeze(0) # shape (1, dim)\n",
" x_torch = torch.tensor(x, dtype=torch.float32).unsqueeze(\n",
" 0\n",
" ) # shape (1, dim)\n",
" x_torch = LabelTensor(x_torch, [\"x\", \"y\", \"z\"])\n",
" dx = model(x_torch).squeeze(0)\n",
" return dx.numpy()\n",
"\n",
"\n",
"n_ic_s_test = 50\n",
"x0s = (np.random.rand(n_ic_s_test, dim) - 0.5) * 30.0\n",
"\n",