add LogPhysEncoder
This commit is contained in:
@@ -4,6 +4,27 @@ from torch_geometric.nn import MessagePassing
|
||||
from torch.nn.utils import spectral_norm
|
||||
|
||||
|
||||
class LogPhysEncoder(nn.Module):
|
||||
"""
|
||||
Processes 1/dx in log-space to handle multiple scales of geometry
|
||||
(from micro-meshes to macro-meshes) without numerical instability.
|
||||
"""
|
||||
|
||||
def __init__(self, hidden_dim):
|
||||
super().__init__()
|
||||
self.mlp = nn.Sequential(
|
||||
spectral_norm(nn.Linear(1, hidden_dim)),
|
||||
nn.GELU(),
|
||||
spectral_norm(nn.Linear(hidden_dim, 1)),
|
||||
nn.Softplus(), # Physical conductance must be positive
|
||||
)
|
||||
|
||||
def forward(self, inv_dx):
|
||||
# We use log(1/dx) to linearize the scale of different geometries
|
||||
log_inv_dx = torch.log(inv_dx + 1e-9)
|
||||
return self.mlp(log_inv_dx)
|
||||
|
||||
|
||||
class DiffusionLayer(MessagePassing):
|
||||
"""
|
||||
Modella: T_new = T_old + dt * Divergenza(Flusso)
|
||||
@@ -22,12 +43,7 @@ class DiffusionLayer(MessagePassing):
|
||||
spectral_norm(nn.Linear(channels, channels, bias=False)),
|
||||
)
|
||||
|
||||
self.phys_encoder = nn.Sequential(
|
||||
spectral_norm(nn.Linear(1, 8, bias=True)),
|
||||
nn.Tanh(),
|
||||
spectral_norm(nn.Linear(8, 1, bias=True)),
|
||||
nn.Softplus(),
|
||||
)
|
||||
self.phys_encoder = LogPhysEncoder(hidden_dim=channels)
|
||||
|
||||
self.alpha_param = nn.Parameter(torch.tensor(1e-2))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user