Compare commits
6 Commits
3cc1d230e4
...
92104a6b06
| Author | SHA1 | Date | |
|---|---|---|---|
| 92104a6b06 | |||
| 68a7def5e6 | |||
| db50f5ed69 | |||
| 0a034225ef | |||
|
|
4fdf817d75 | ||
|
|
a9d56a3ed9 |
@@ -15,7 +15,7 @@ def import_class(class_path: str):
|
||||
return cls
|
||||
|
||||
|
||||
def _plot_mesh(pos_, y_, y_pred_, y_true_, batch, i, batch_idx):
|
||||
def _plot_mesh(pos_, y_, y_pred_, y_true_, batch, cells, i, batch_idx):
|
||||
# print(pos_.shape, y_.shape, y_pred_.shape, y_true_.shape)
|
||||
for j in [0]:
|
||||
idx = (batch == j).nonzero(as_tuple=True)[0]
|
||||
@@ -25,11 +25,12 @@ def _plot_mesh(pos_, y_, y_pred_, y_true_, batch, i, batch_idx):
|
||||
# print(pos.shape, y.shape, y_pred.shape)
|
||||
y_true = y_true_[idx].detach().cpu()
|
||||
y_true = torch.clamp(y_true, min=0)
|
||||
folder = f"{j:02d}_images"
|
||||
folder = f"{batch_idx:02d}_images"
|
||||
if os.path.exists(folder) is False:
|
||||
os.makedirs(folder)
|
||||
tria = Triangulation(pos[:, 0], pos[:, 1])
|
||||
plt.figure(figsize=(18, 6))
|
||||
triangles = torch.vstack([cells[:, [0, 1, 2]], cells[:, [0, 2, 3]]])
|
||||
tria = Triangulation(pos[:, 0], pos[:, 1], triangles=triangles)
|
||||
plt.figure(figsize=(24, 6))
|
||||
# plt.subplot(1, 4, 1)
|
||||
# plt.tricontourf(tria, y.squeeze().numpy(), levels=100)
|
||||
# plt.colorbar()
|
||||
@@ -37,61 +38,79 @@ def _plot_mesh(pos_, y_, y_pred_, y_true_, batch, i, batch_idx):
|
||||
# plt.tripcolor(tria, y_pred.squeeze().numpy()
|
||||
# plt.savefig("test_scatter_step_before.png", dpi=72)
|
||||
# x = z
|
||||
plt.subplot(1, 3, 1)
|
||||
# plt.tricontourf(tria, y_pred.squeeze().numpy(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=y_pred.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
)
|
||||
plt.subplot(1, 4, 1)
|
||||
plt.tricontourf(tria, y_pred.squeeze().numpy(), levels=100)
|
||||
# plt.scatter(pos[:, 0], pos[:, 1], c=y_pred.squeeze().numpy(), s=20, cmap="viridis",)
|
||||
plt.colorbar()
|
||||
plt.title("Step t Predicted")
|
||||
plt.subplot(1, 3, 2)
|
||||
# plt.tricontourf(tria, y_true.squeeze().numpy(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=y_true.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
)
|
||||
plt.title(f"Prediction at timestep {i:03d}")
|
||||
plt.subplot(1, 4, 2)
|
||||
plt.tricontourf(tria, y_true.squeeze().numpy(), levels=100)
|
||||
# plt.scatter(pos[:, 0], pos[:, 1], c=y_true.squeeze().numpy(), s=20, cmap="viridis")
|
||||
plt.colorbar()
|
||||
plt.title("t True")
|
||||
plt.subplot(1, 3, 3)
|
||||
per_element_relative_error = torch.abs(y_pred - y_true) / torch.clamp(
|
||||
torch.abs(y_true), min=1e-6
|
||||
plt.title("Ground Truth Steady State")
|
||||
plt.subplot(1, 4, 3)
|
||||
per_element_relative_error = torch.abs(y_pred - y_true) / (
|
||||
y_true + 1e-6
|
||||
)
|
||||
# plt.tricontourf(tria, per_element_relative_error.squeeze(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=per_element_relative_error.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
per_element_relative_error = torch.clamp(
|
||||
per_element_relative_error, max=1.0, min=0.0
|
||||
)
|
||||
plt.tricontourf(
|
||||
tria,
|
||||
per_element_relative_error.squeeze(),
|
||||
levels=100,
|
||||
vmin=0,
|
||||
vmax=1.0,
|
||||
)
|
||||
# plt.scatter(pos[:, 0], pos[:, 1], c=per_element_relative_error.squeeze().numpy(), s=20, cmap="viridis", vmin=0, vmax=1.0)
|
||||
plt.colorbar()
|
||||
plt.title("Relative Error")
|
||||
plt.subplot(1, 4, 4)
|
||||
absolute_error = torch.abs(y_pred - y_true)
|
||||
plt.tricontourf(tria, absolute_error.squeeze(), levels=100)
|
||||
# plt.scatter(pos[:, 0], pos[:, 1], c=absolute_error.squeeze().numpy(), s=20, cmap="viridis")
|
||||
plt.colorbar()
|
||||
plt.title("Absolute Error")
|
||||
plt.suptitle("GNO", fontsize=16)
|
||||
name = f"{folder}/{j:04d}_graph_iter_{i:04d}.png"
|
||||
plt.savefig(name, dpi=72)
|
||||
plt.close()
|
||||
|
||||
|
||||
def _plot_losses(test_losses, batch_idx):
|
||||
folder = f"{batch_idx:02d}_images"
|
||||
plt.figure()
|
||||
def _plot_losses(relative_errors, test_losses, relative_update, batch_idx):
|
||||
# folder = f"{batch_idx:02d}_images"
|
||||
plt.figure(figsize=(18, 6))
|
||||
plt.subplot(1, 3, 1)
|
||||
for i, losses in enumerate(test_losses):
|
||||
plt.plot(losses)
|
||||
if i == 3:
|
||||
break
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Iteration")
|
||||
plt.ylabel("Relative Error")
|
||||
plt.ylabel("Test Loss")
|
||||
plt.title("Test Loss over Iterations")
|
||||
plt.grid(True)
|
||||
file_name = f"{folder}/test_loss.png"
|
||||
plt.subplot(1, 3, 2)
|
||||
for i, losses in enumerate(relative_errors):
|
||||
plt.plot(losses)
|
||||
if i == 3:
|
||||
break
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Iteration")
|
||||
plt.ylabel("Relative Error")
|
||||
plt.title("Relative error over Iterations")
|
||||
plt.grid(True)
|
||||
plt.subplot(1, 3, 3)
|
||||
for i, updates in enumerate(relative_update):
|
||||
plt.plot(updates)
|
||||
if i == 3:
|
||||
break
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Iteration")
|
||||
plt.ylabel("Relative Update")
|
||||
plt.title("Relative update over Iterations")
|
||||
plt.grid(True)
|
||||
file_name = f"test_errors.png"
|
||||
plt.savefig(file_name, dpi=300)
|
||||
plt.close()
|
||||
|
||||
@@ -112,6 +131,8 @@ class GraphSolver(LightningModule):
|
||||
self.loss = loss if loss is not None else torch.nn.MSELoss()
|
||||
self.unrolling_steps = unrolling_steps
|
||||
self.test_losses = []
|
||||
self.test_relative_errors = []
|
||||
self.test_relative_updates = []
|
||||
|
||||
def _compute_loss(self, x, y):
|
||||
return self.loss(x, y)
|
||||
@@ -166,6 +187,7 @@ class GraphSolver(LightningModule):
|
||||
)
|
||||
losses = []
|
||||
for i in range(self.unrolling_steps):
|
||||
# print(f"Training step {i+1}/{self.unrolling_steps}")
|
||||
out = self._compute_model_steps(
|
||||
x,
|
||||
edge_index,
|
||||
@@ -216,20 +238,20 @@ class GraphSolver(LightningModule):
|
||||
batch.boundary_values,
|
||||
conductivity,
|
||||
)
|
||||
if (
|
||||
batch_idx == 0
|
||||
and self.current_epoch % 10 == 0
|
||||
and self.current_epoch > 0
|
||||
):
|
||||
_plot_mesh(
|
||||
batch.pos,
|
||||
x,
|
||||
out,
|
||||
y[:, i, :],
|
||||
batch.batch,
|
||||
i,
|
||||
self.current_epoch,
|
||||
)
|
||||
# if (
|
||||
# batch_idx == 0
|
||||
# and self.current_epoch % 10 == 0
|
||||
# and self.current_epoch > 0
|
||||
# ):
|
||||
# _plot_mesh(
|
||||
# batch.pos,
|
||||
# x,
|
||||
# out,
|
||||
# y[:, i, :],
|
||||
# batch.batch,
|
||||
# i,
|
||||
# self.current_epoch,
|
||||
# )
|
||||
x = out
|
||||
losses.append(self.loss(out, y[:, i, :]))
|
||||
|
||||
@@ -237,11 +259,11 @@ class GraphSolver(LightningModule):
|
||||
self._log_loss(loss, batch, "val")
|
||||
return loss
|
||||
|
||||
def _check_convergence(self, y_new, y_old, tol=1e-3):
|
||||
l2_norm = torch.norm(y_new, p=2) - torch.norm(y_old, p=2)
|
||||
def _check_convergence(self, y_new, y_old, tol=1e-4):
|
||||
l2_norm = torch.norm(y_new - y_old, p=2)
|
||||
y_old_norm = torch.norm(y_old, p=2)
|
||||
rel_error = l2_norm / (y_old_norm)
|
||||
return rel_error.item() < tol
|
||||
return rel_error.item() < tol, rel_error.item()
|
||||
|
||||
def test_step(self, batch: Batch, batch_idx):
|
||||
x, y, edge_index, edge_attr, conductivity = self._preprocess_batch(
|
||||
@@ -251,9 +273,14 @@ class GraphSolver(LightningModule):
|
||||
losses = []
|
||||
all_losses = []
|
||||
norms = []
|
||||
s = []
|
||||
relative_updates = []
|
||||
sequence_length = y.size(1)
|
||||
y = y[:, -1, :].unsqueeze(1)
|
||||
for i in range(100):
|
||||
_plot_mesh(
|
||||
batch.pos, x, x, y[:, -1, :], batch.batch, batch.cells, 0, batch_idx
|
||||
)
|
||||
for i in range(200):
|
||||
out = self._compute_model_steps(
|
||||
# torch.cat([x,pos], dim=-1),
|
||||
x,
|
||||
@@ -265,23 +292,27 @@ class GraphSolver(LightningModule):
|
||||
conductivity,
|
||||
)
|
||||
norms.append(torch.norm(out - x, p=2).item())
|
||||
converged = self._check_convergence(out, x)
|
||||
if batch_idx == 0:
|
||||
converged, relative_update = self._check_convergence(out, x)
|
||||
relative_updates.append(relative_update)
|
||||
if batch_idx <= 4:
|
||||
print(f"Plotting iteration {i}, norm diff: {norms[-1]}")
|
||||
_plot_mesh(
|
||||
batch.pos,
|
||||
x,
|
||||
out,
|
||||
y[:, -1, :],
|
||||
batch.batch,
|
||||
i,
|
||||
self.current_epoch,
|
||||
batch.cells,
|
||||
i + 1,
|
||||
batch_idx,
|
||||
)
|
||||
x = out
|
||||
loss = self.loss(out, y[:, -1, :])
|
||||
relative_error = torch.norm(out - y[:, -1, :], p=2) / torch.norm(
|
||||
y[:, -1, :], p=2
|
||||
relative_error = torch.abs(out - y[:, -1, :]) / (
|
||||
torch.abs(y[:, -1, :]) + 1e-6
|
||||
)
|
||||
all_losses.append(relative_error.item())
|
||||
mean_relative_error = relative_error.mean()
|
||||
all_losses.append(mean_relative_error.item())
|
||||
losses.append(loss)
|
||||
if converged:
|
||||
print(
|
||||
@@ -289,13 +320,20 @@ class GraphSolver(LightningModule):
|
||||
)
|
||||
break
|
||||
loss = torch.stack(losses).mean()
|
||||
self.test_losses.append(all_losses)
|
||||
self.test_losses.append(losses)
|
||||
self.test_relative_errors.append(all_losses)
|
||||
self.test_relative_updates.append(relative_updates)
|
||||
self._log_loss(loss, batch, "test")
|
||||
return loss
|
||||
|
||||
def on_test_end(self):
|
||||
if len(self.test_losses) > 0:
|
||||
_plot_losses(self.test_losses, batch_idx=0)
|
||||
_plot_losses(
|
||||
self.test_relative_errors,
|
||||
self.test_losses,
|
||||
self.test_relative_updates,
|
||||
batch_idx=0,
|
||||
)
|
||||
|
||||
def configure_optimizers(self):
|
||||
optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3)
|
||||
|
||||
@@ -82,7 +82,9 @@ class GraphDataModule(LightningDataModule):
|
||||
conductivity = torch.tensor(
|
||||
snapshot["conductivity"], dtype=torch.float32
|
||||
)
|
||||
temperature = torch.tensor(snapshot["temperature"], dtype=torch.float32)
|
||||
temperature = torch.tensor(
|
||||
snapshot["temperature"], dtype=torch.float32
|
||||
)[:50]
|
||||
|
||||
pos = torch.tensor(geometry["points"], dtype=torch.float32)[:, :2]
|
||||
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
from lightning import LightningDataModule
|
||||
from datasets import load_dataset
|
||||
from datasets import load_dataset, concatenate_datasets
|
||||
from torch_geometric.data import Data
|
||||
from torch_geometric.loader import DataLoader
|
||||
from torch_geometric.utils import to_undirected
|
||||
from .mesh_data import MeshData
|
||||
from typing import List, Union
|
||||
|
||||
|
||||
class GraphDataModule(LightningDataModule):
|
||||
def __init__(
|
||||
self,
|
||||
hf_repo: str,
|
||||
split_name: str,
|
||||
split_name: Union[str, List[str]],
|
||||
n_elements: int = None,
|
||||
train_size: float = 0.2,
|
||||
val_size: float = 0.1,
|
||||
@@ -22,6 +23,8 @@ class GraphDataModule(LightningDataModule):
|
||||
build_radial_graph: bool = False,
|
||||
radius: float = None,
|
||||
unrolling_steps: int = 1,
|
||||
aggregate_timesteps: int = 1,
|
||||
min_normalized_diff: float = 1e-3,
|
||||
):
|
||||
super().__init__()
|
||||
self.hf_repo = hf_repo
|
||||
@@ -34,6 +37,9 @@ class GraphDataModule(LightningDataModule):
|
||||
None,
|
||||
)
|
||||
self.unrolling_steps = unrolling_steps
|
||||
self.aggregate_timesteps = aggregate_timesteps
|
||||
self.min_normalized_diff = min_normalized_diff
|
||||
|
||||
self.geometry_dict = {}
|
||||
self.train_size = train_size
|
||||
self.val_size = val_size
|
||||
@@ -44,8 +50,30 @@ class GraphDataModule(LightningDataModule):
|
||||
self.radius = radius
|
||||
|
||||
def prepare_data(self):
|
||||
dataset = load_dataset(self.hf_repo, name="snapshots")[self.split_name]
|
||||
geometry = load_dataset(self.hf_repo, name="geometry")[self.split_name]
|
||||
if isinstance(self.split_name, list):
|
||||
dataset_list = []
|
||||
geometry_list = []
|
||||
for split in self.split_name:
|
||||
dataset_list.append(
|
||||
load_dataset(self.hf_repo, name="snapshots")[split]
|
||||
)
|
||||
geometry_list.append(
|
||||
load_dataset(self.hf_repo, name="geometry")[split]
|
||||
)
|
||||
|
||||
dataset = concatenate_datasets(dataset_list)
|
||||
geometry = concatenate_datasets(geometry_list)
|
||||
idx = torch.randperm(len(dataset))
|
||||
dataset = dataset.select(idx.tolist())
|
||||
geometry = geometry.select(idx.tolist())
|
||||
else:
|
||||
dataset = load_dataset(self.hf_repo, name="snapshots")[
|
||||
self.split_name
|
||||
]
|
||||
geometry = load_dataset(self.hf_repo, name="geometry")[
|
||||
self.split_name
|
||||
]
|
||||
|
||||
if self.n_elements is not None:
|
||||
dataset = dataset.select(range(self.n_elements))
|
||||
geometry = geometry.select(range(self.n_elements))
|
||||
@@ -86,10 +114,16 @@ class GraphDataModule(LightningDataModule):
|
||||
dim=0,
|
||||
)
|
||||
)
|
||||
print(temperatures.shape)
|
||||
|
||||
if not test:
|
||||
for t in range(1, temperatures.size(0)):
|
||||
diff = temperatures[t, :] - temperatures[t - 1, :]
|
||||
norm_diff = torch.norm(diff, p=2) / torch.norm(
|
||||
temperatures[t - 1], p=2
|
||||
)
|
||||
if norm_diff < self.min_normalized_diff:
|
||||
temperatures = temperatures[: t + 1, :]
|
||||
break
|
||||
pos = torch.tensor(geometry["points"], dtype=torch.float32)[:, :2]
|
||||
|
||||
if self.build_radial_graph:
|
||||
raise NotImplementedError(
|
||||
"Radial graph building not implemented yet."
|
||||
@@ -103,9 +137,7 @@ class GraphDataModule(LightningDataModule):
|
||||
boundary_mask = torch.tensor(
|
||||
geometry["constraints_mask"], dtype=torch.int64
|
||||
)
|
||||
boundary_values = torch.tensor(
|
||||
geometry["constraints_values"], dtype=torch.float32
|
||||
)
|
||||
boundary_values = temperatures[0, boundary_mask]
|
||||
|
||||
edge_attr = torch.norm(pos[edge_index[0]] - pos[edge_index[1]], dim=1)
|
||||
if self.remove_boundary_edges:
|
||||
@@ -118,6 +150,9 @@ class GraphDataModule(LightningDataModule):
|
||||
data = []
|
||||
|
||||
if test:
|
||||
cells = geometry.get("cells", None)
|
||||
if cells is not None:
|
||||
cells = torch.tensor(cells, dtype=torch.int64)
|
||||
data.append(
|
||||
MeshData(
|
||||
x=temperatures[0, :].unsqueeze(-1),
|
||||
@@ -128,6 +163,7 @@ class GraphDataModule(LightningDataModule):
|
||||
edge_attr=edge_attr,
|
||||
boundary_mask=boundary_mask,
|
||||
boundary_values=boundary_values,
|
||||
cells=cells,
|
||||
)
|
||||
)
|
||||
return data
|
||||
@@ -203,7 +239,7 @@ class GraphDataModule(LightningDataModule):
|
||||
batch_size=self.batch_size,
|
||||
shuffle=True,
|
||||
num_workers=8,
|
||||
pin_memory=True,
|
||||
pin_memory=False,
|
||||
)
|
||||
|
||||
def val_dataloader(self):
|
||||
@@ -216,7 +252,7 @@ class GraphDataModule(LightningDataModule):
|
||||
batch_size=128,
|
||||
shuffle=False,
|
||||
num_workers=8,
|
||||
pin_memory=True,
|
||||
pin_memory=False,
|
||||
)
|
||||
|
||||
def test_dataloader(self):
|
||||
@@ -226,5 +262,5 @@ class GraphDataModule(LightningDataModule):
|
||||
batch_size=1,
|
||||
shuffle=False,
|
||||
num_workers=8,
|
||||
pin_memory=True,
|
||||
pin_memory=False,
|
||||
)
|
||||
|
||||
@@ -4,6 +4,27 @@ from torch_geometric.nn import MessagePassing
|
||||
from torch.nn.utils import spectral_norm
|
||||
|
||||
|
||||
class LogPhysEncoder(nn.Module):
|
||||
"""
|
||||
Processes 1/dx in log-space to handle multiple scales of geometry
|
||||
(from micro-meshes to macro-meshes) without numerical instability.
|
||||
"""
|
||||
|
||||
def __init__(self, hidden_dim):
|
||||
super().__init__()
|
||||
self.mlp = nn.Sequential(
|
||||
spectral_norm(nn.Linear(1, hidden_dim)),
|
||||
nn.GELU(),
|
||||
spectral_norm(nn.Linear(hidden_dim, 1)),
|
||||
nn.Softplus(), # Physical conductance must be positive
|
||||
)
|
||||
|
||||
def forward(self, inv_dx):
|
||||
# We use log(1/dx) to linearize the scale of different geometries
|
||||
log_inv_dx = torch.log(inv_dx + 1e-9)
|
||||
return self.mlp(log_inv_dx)
|
||||
|
||||
|
||||
class DiffusionLayer(MessagePassing):
|
||||
"""
|
||||
Modella: T_new = T_old + dt * Divergenza(Flusso)
|
||||
@@ -22,12 +43,7 @@ class DiffusionLayer(MessagePassing):
|
||||
spectral_norm(nn.Linear(channels, channels, bias=False)),
|
||||
)
|
||||
|
||||
self.phys_encoder = nn.Sequential(
|
||||
spectral_norm(nn.Linear(1, 8, bias=True)),
|
||||
nn.Tanh(),
|
||||
spectral_norm(nn.Linear(8, 1, bias=True)),
|
||||
nn.Softplus(),
|
||||
)
|
||||
self.phys_encoder = LogPhysEncoder(hidden_dim=channels)
|
||||
|
||||
self.alpha_param = nn.Parameter(torch.tensor(1e-2))
|
||||
|
||||
@@ -123,3 +139,4 @@ class DiffusionNet(nn.Module):
|
||||
# 6. Final Update (Explicit Euler Step)
|
||||
# T_new = T_old + Correction
|
||||
return delta_x + x_input * self.dt
|
||||
# return delta_x
|
||||
|
||||
@@ -0,0 +1,72 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.adaptive_refined"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.adaptive_refined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name: "3_stripes.basic.1_adaptive_refined"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
@@ -0,0 +1,63 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.adaptive_refined.combined"
|
||||
callbacks:
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 10
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.adaptive_refined.combined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name:
|
||||
- "4_stripes.basic.1_adaptive_refined"
|
||||
- "3_stripes.basic.1_adaptive_refined"
|
||||
- "2_stripes.basic.1_adaptive_refined"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
72
experiments/10_steps/config_16_layer_16_hidden_refined.yaml
Normal file
72
experiments/10_steps/config_16_layer_16_hidden_refined.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.refined"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.refined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name: "3_stripes.basic.refined"
|
||||
n_elements: 50
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
@@ -0,0 +1,77 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.refined.combined"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 10
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.refined.combined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 10
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name:
|
||||
- "4_stripes.basic.refined"
|
||||
- "3_stripes.basic.refined"
|
||||
- "2_stripes.basic.refined"
|
||||
n_elements: 75
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 10
|
||||
min_normalized_diff: 1e-4
|
||||
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.refined.combined/16_layer_16_hidden/6_unrolling_best_checkpoint.ckpt
|
||||
@@ -0,0 +1,72 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.refined.star"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.refined.star/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name: "3_stripes.star"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
72
experiments/10_steps/config_16_layer_16_hidden_star.yaml
Normal file
72
experiments/10_steps/config_16_layer_16_hidden_star.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.star"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.star/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name: "3_stripes.star.refined"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
@@ -0,0 +1,75 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "16_layer_16_hidden.star.combined"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.star.combined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name:
|
||||
- "4_stripes.star"
|
||||
- "3_stripes.star"
|
||||
- "2_stripes.star"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
72
experiments/10_steps/config_8_layer_16_hidden_star.yaml
Normal file
72
experiments/10_steps/config_8_layer_16_hidden_star.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: gpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
logger:
|
||||
- class_path: lightning.pytorch.loggers.WandbLogger
|
||||
init_args:
|
||||
save_dir: logs.autoregressive.wandb
|
||||
project: "thermal-conduction-unsteady-10.steps"
|
||||
name: "8_layer_16_hidden.star"
|
||||
callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
- class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
init_args:
|
||||
increase_unrolling_steps_by: 4
|
||||
patience: 5
|
||||
last_patience: 15
|
||||
max_unrolling_steps: 10
|
||||
ckpt_path: logs.autoregressive.wandb/10_steps/basic.star/8_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 8
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name: "3_stripes.star.refined"
|
||||
n_elements: 100
|
||||
batch_size: 24
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
min_normalized_diff: 1e-4
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
@@ -0,0 +1,76 @@
|
||||
# lightning.pytorch==2.5.5
|
||||
seed_everything: 1999
|
||||
trainer:
|
||||
accelerator: cpu
|
||||
strategy: auto
|
||||
devices: 1
|
||||
num_nodes: 1
|
||||
precision: null
|
||||
# logger:
|
||||
# - class_path: lightning.pytorch.loggers.WandbLogger
|
||||
# init_args:
|
||||
# save_dir: logs.autoregressive.wandb
|
||||
# project: "thermal-conduction-unsteady-10.steps"
|
||||
# name: "16_layer_16_hidden.adaptive_refined.combined"
|
||||
# callbacks:
|
||||
# - class_path: lightning.pytorch.callbacks.ModelCheckpoint
|
||||
# init_args:
|
||||
# dirpath: logs.autoregressive.wandb/16_refined.10_steps/checkpoints
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# save_top_k: 1
|
||||
# filename: best-checkpoint
|
||||
# - class_path: lightning.pytorch.callbacks.EarlyStopping
|
||||
# init_args:
|
||||
# monitor: val/loss
|
||||
# mode: min
|
||||
# patience: 30
|
||||
# verbose: false
|
||||
# - class_path: ThermalSolver.switch_dataloader_callback.SwitchDataLoaderCallback
|
||||
# init_args:
|
||||
# increase_unrolling_steps_by: 4
|
||||
# patience: 15
|
||||
# last_patience: 20
|
||||
# max_unrolling_steps: 10
|
||||
# ckpt_path: logs.autoregressive.wandb/10_steps/basic.adaptive_refined.combined/16_layer_16_hidden/
|
||||
max_epochs: 1000
|
||||
min_epochs: null
|
||||
max_steps: -1
|
||||
min_steps: null
|
||||
overfit_batches: 0.0
|
||||
log_every_n_steps: 0
|
||||
accumulate_grad_batches: 1
|
||||
default_root_dir: null
|
||||
gradient_clip_val: 1.0
|
||||
|
||||
model:
|
||||
class_path: ThermalSolver.autoregressive_module.GraphSolver
|
||||
init_args:
|
||||
model_class_path: ThermalSolver.model.diffusion_net.DiffusionNet
|
||||
model_init_args:
|
||||
input_dim: 1
|
||||
hidden_dim: 16
|
||||
output_dim: 1
|
||||
n_layers: 16
|
||||
unrolling_steps: 2
|
||||
|
||||
data:
|
||||
class_path: ThermalSolver.graph_datamodule_unsteady.GraphDataModule
|
||||
init_args:
|
||||
hf_repo: "SISSAmathLab/thermal-conduction-unsteady"
|
||||
split_name:
|
||||
# - "2_stripes.basic.refined"
|
||||
# - "3_stripes.basic.refined"
|
||||
# - "4_stripes.basic.1_adaptive_refined"
|
||||
- "3_stripes.star"
|
||||
n_elements: 50
|
||||
batch_size: 32
|
||||
train_size: 0.7
|
||||
val_size: 0.2
|
||||
test_size: 0.1
|
||||
build_radial_graph: false
|
||||
remove_boundary_edges: true
|
||||
unrolling_steps: 2
|
||||
optimizer: null
|
||||
lr_scheduler: null
|
||||
ckpt_path: /home/folivo/storage/thermal-conduction-ml/logs.autoregressive.wandb/10_steps/basic.star.combined/16_layer_16_hidden/10_unrolling_best_checkpoint.ckpt
|
||||
Reference in New Issue
Block a user