Files
thermal-conduction-ml/experiments/config_gno.yaml
Filippo Olivo edba700d2a add config file
2025-11-20 11:39:19 +01:00

63 lines
1.6 KiB
YAML

# lightning.pytorch==2.5.5
seed_everything: 1999
trainer:
accelerator: gpu
strategy: auto
devices: 1
num_nodes: 1
precision: null
logger:
- class_path: lightning.pytorch.loggers.TensorBoardLogger
init_args:
save_dir: logs
name: "test"
version: null
callbacks:
- class_path: lightning.pytorch.callbacks.ModelCheckpoint
init_args:
monitor: val/loss
mode: min
save_top_k: 1
filename: best-checkpoint
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
monitor: val/loss
mode: min
patience: 25
verbose: false
max_epochs: 1000
min_epochs: null
max_steps: -1
min_steps: null
overfit_batches: 0.0
log_every_n_steps: null
# inference_mode: true
default_root_dir: null
# accumulate_grad_batches: 2
# gradient_clip_val: 1.0
model:
class_path: ThermalSolver.graph_module.GraphSolver
init_args:
model_class_path: ThermalSolver.model.learnable_finite_difference.CorrectionNet
curriculum_learning: true
start_iters: 5
increase_every: 10
increase_rate: 2
max_iters: 2000
accumulation_iters: 320
data:
class_path: ThermalSolver.graph_datamodule.GraphDataModule
init_args:
hf_repo: "SISSAmathLab/thermal-conduction"
split_name: "1000_40x30"
batch_size: 32
train_size: 0.8
test_size: 0.1
test_size: 0.1
build_radial_graph: false
radius: 0.6
remove_boundary_edges: false
optimizer: null
lr_scheduler: null
# ckpt_path: logs/test/version_0/checkpoints/best-checkpoint.ckpt