export tutorials changed in dd88513 (#559)

Co-authored-by: dario-coscia <dario-coscia@users.noreply.github.com>
This commit is contained in:
github-actions[bot]
2025-04-23 18:48:47 +02:00
committed by Dario Coscia
parent 228f807d92
commit d10c525e74
53 changed files with 65690 additions and 2320 deletions

View File

@@ -97,12 +97,13 @@
"source": [
"## routine needed to run the notebook on Google Colab\n",
"try:\n",
" import google.colab\n",
" IN_COLAB = True\n",
" import google.colab\n",
"\n",
" IN_COLAB = True\n",
"except:\n",
" IN_COLAB = False\n",
" IN_COLAB = False\n",
"if IN_COLAB:\n",
" !pip install \"pina-mathlab[tutorial]\"\n",
" !pip install \"pina-mathlab[tutorial]\"\n",
"\n",
"import warnings\n",
"import torch\n",
@@ -140,16 +141,18 @@
"outputs": [],
"source": [
"# (a) Data generation and plot\n",
"domain = CartesianDomain({'x' : [-3, 3]})\n",
"domain = CartesianDomain({\"x\": [-3, 3]})\n",
"x = domain.sample(n=20, mode=\"random\")\n",
"y = LabelTensor(x.pow(3) + 3*torch.randn_like(x), 'y')\n",
"y = LabelTensor(x.pow(3) + 3 * torch.randn_like(x), \"y\")\n",
"\n",
"\n",
"# (b) PINA Problem formulation\n",
"class BayesianProblem(AbstractProblem):\n",
"\n",
" output_variables = ['y']\n",
" input_variables = ['x']\n",
" conditions = {'data': Condition(input_points=x, output_points=y)}\n",
" output_variables = [\"y\"]\n",
" input_variables = [\"x\"]\n",
" conditions = {\"data\": Condition(input_points=x, output_points=y)}\n",
"\n",
"\n",
"problem = BayesianProblem()\n",
"\n",
@@ -309,32 +312,35 @@
}
],
"source": [
"from pina.solver import SupervisedSolver \n",
"from pina.solver import SupervisedSolver\n",
"from pina.trainer import Trainer\n",
"\n",
"\n",
"# define problem & data (step 1)\n",
"class BayesianModel(torch.nn.Module):\n",
" def __init__(self):\n",
" super().__init__()\n",
" self.layers = torch.nn.Sequential(\n",
" torch.nn.Linear(1, 100),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Dropout(0.5),\n",
" torch.nn.Linear(100, 1)\n",
" )\n",
" torch.nn.Linear(1, 100),\n",
" torch.nn.ReLU(),\n",
" torch.nn.Dropout(0.5),\n",
" torch.nn.Linear(100, 1),\n",
" )\n",
"\n",
" def forward(self, x):\n",
" return self.layers(x)\n",
"\n",
"\n",
"problem = BayesianProblem()\n",
"\n",
"# model design (step 2)\n",
"model = BayesianModel()\n",
"model = BayesianModel()\n",
"\n",
"# solver selection (step 3)\n",
"solver = SupervisedSolver(problem, model)\n",
"solver = SupervisedSolver(problem, model)\n",
"\n",
"# training (step 4)\n",
"trainer = Trainer(solver=solver, max_epochs=2000, accelerator='cpu')\n",
"trainer = Trainer(solver=solver, max_epochs=2000, accelerator=\"cpu\")\n",
"trainer.train()"
]
},
@@ -374,17 +380,23 @@
}
],
"source": [
"x_test = LabelTensor(torch.linspace(-4, 4, 100).reshape(-1, 1), 'x')\n",
"x_test = LabelTensor(torch.linspace(-4, 4, 100).reshape(-1, 1), \"x\")\n",
"y_test = torch.stack([solver(x_test) for _ in range(1000)], dim=0)\n",
"y_mean, y_std = y_test.mean(0).detach(), y_test.std(0).detach()\n",
"# plot\n",
"x_test = x_test.flatten()\n",
"y_mean = y_mean.flatten()\n",
"y_std = y_std.flatten()\n",
"plt.plot(x_test, y_mean, label=r'$\\mu_{\\theta}$')\n",
"plt.fill_between(x_test, y_mean-3*y_std, y_mean+3*y_std, alpha=0.3, label=r'3$\\sigma_{\\theta}$')\n",
"plt.plot(x_test, x_test.pow(3), label='true')\n",
"plt.scatter(x, y, label='train data')\n",
"y_std = y_std.flatten()\n",
"plt.plot(x_test, y_mean, label=r\"$\\mu_{\\theta}$\")\n",
"plt.fill_between(\n",
" x_test,\n",
" y_mean - 3 * y_std,\n",
" y_mean + 3 * y_std,\n",
" alpha=0.3,\n",
" label=r\"3$\\sigma_{\\theta}$\",\n",
")\n",
"plt.plot(x_test, x_test.pow(3), label=\"true\")\n",
"plt.scatter(x, y, label=\"train data\")\n",
"plt.legend()\n",
"plt.show()"
]
@@ -598,10 +610,10 @@
}
],
"source": [
"print('Points are not automatically sampled, you can see this by:')\n",
"print(\"Points are not automatically sampled, you can see this by:\")\n",
"print(f\" {poisson_problem.are_all_domains_discretised=}\\n\")\n",
"print('But you can easily sample by running .discretise_domain:')\n",
"poisson_problem.discretise_domain(n=1000, domains=['interior'])\n",
"print(\"But you can easily sample by running .discretise_domain:\")\n",
"poisson_problem.discretise_domain(n=1000, domains=[\"interior\"])\n",
"poisson_problem.discretise_domain(n=100, domains=[\"border\"])\n",
"print(f\" {poisson_problem.are_all_domains_discretised=}\")"
]
@@ -632,10 +644,10 @@
"from pina.model import FeedForward\n",
"\n",
"model = FeedForward(\n",
" func = torch.nn.Tanh,\n",
" layers=[120]*2,\n",
" func=torch.nn.Tanh,\n",
" layers=[120] * 2,\n",
" output_dimensions=len(poisson_problem.output_variables),\n",
" input_dimensions=len(poisson_problem.input_variables)\n",
" input_dimensions=len(poisson_problem.input_variables),\n",
")"
]
},