import torch import torch.nn as nn from torch_geometric.nn import MessagePassing class FiniteDifferenceStep(MessagePassing): """ TODO: add docstring. """ def __init__(self, aggr: str = "add", root_weight: float = 1.0): super().__init__(aggr=aggr) assert ( aggr == "add" ), "Per somme pesate, l'aggregazione deve essere 'add'." # self.root_weight = float(root_weight) self.p = torch.nn.Parameter(torch.tensor(0.8)) self.a = root_weight def forward(self, x, edge_index, edge_attr, deg): """ TODO: add docstring. """ out = self.propagate(edge_index, x=x, edge_attr=edge_attr, deg=deg) return out def message(self, x_j, edge_attr): """ TODO: add docstring. """ p = torch.clamp(self.p, 0.0, 1.0) return p * edge_attr.view(-1, 1) * x_j def aggregate(self, inputs, index, deg): """ TODO: add docstring. """ out = super().aggregate(inputs, index) deg = deg + 1e-7 return out / deg.view(-1, 1) def update(self, aggr_out, x): """ TODO: add docstring. """ a = torch.clamp(self.a, 0.0, 1.0) return a * aggr_out + (1 - a) * x # return self.a * aggr_out + (1 - self.a) * x class GraphFiniteDifference(nn.Module): """ TODO: add docstring. """ def __init__(self, max_iters: int = 5000, threshold: float = 1e-4): """ TODO: add docstring. """ super().__init__() self.max_iters = max_iters self.threshold = threshold self.fd_step = FiniteDifferenceStep(aggr="add", root_weight=1.0) @staticmethod def _compute_deg(edge_index, edge_attr, num_nodes): """ TODO: add docstring. """ deg = torch.zeros(num_nodes, device=edge_index.device) deg = deg.scatter_add(0, edge_index[1], edge_attr) return deg + 1e-7 @staticmethod def _compute_c_ij(c, edge_index): """ TODO: add docstring. """ return (0.5 * (c[edge_index[0]] + c[edge_index[1]])).squeeze() def forward( self, x, edge_index, edge_attr, c, boundary_mask, boundary_values, **kwargs, ): """ TODO: add docstring. """ edge_attr = 1 / edge_attr[:, -1] c_ij = self._compute_c_ij(c, edge_index) edge_attr = edge_attr * c_ij deg = self._compute_deg(edge_index, edge_attr, x.size(0)) # Calcola la soglia staccando x dal grafo conv_thres = self.threshold * torch.norm(x.detach()) for _i in range(self.max_iters): out = self.fd_step(x, edge_index, edge_attr, deg) out[boundary_mask] = boundary_values.unsqueeze(-1) # Controllo convergenza senza tracciamento gradienti with torch.no_grad(): residual_norm = torch.norm(out - x) if residual_norm < conv_thres: break # --- OTTIMIZZAZIONE CHIAVE --- # Stacca 'out' dal grafo prima della prossima iterazione # per evitare BPTT e risparmiare memoria. x = out.detach() # Il 'out' finale restituito mantiene i gradienti # dell'ULTIMA chiamata a fd_step, permettendo al modello # di apprendere correttamente. return out, _i + 1