new data format
This commit is contained in:
@@ -16,48 +16,79 @@ def import_class(class_path: str):
|
||||
|
||||
|
||||
def _plot_mesh(pos_, y_, y_pred_, y_true_, batch, i, batch_idx):
|
||||
for j in [0, 10, 20, 30]:
|
||||
# print(pos_.shape, y_.shape, y_pred_.shape, y_true_.shape)
|
||||
for j in [0]:
|
||||
idx = (batch == j).nonzero(as_tuple=True)[0]
|
||||
y = y_[idx].detach().cpu()
|
||||
y_pred = y_pred_[idx].detach().cpu()
|
||||
pos = pos_[idx].detach().cpu()
|
||||
# print(pos.shape, y.shape, y_pred.shape)
|
||||
y_true = y_true_[idx].detach().cpu()
|
||||
y_true = torch.clamp(y_true, min=0)
|
||||
folder = f"{j:02d}_images"
|
||||
if os.path.exists(folder) is False:
|
||||
os.makedirs(folder)
|
||||
pos = pos.detach().cpu()
|
||||
tria = Triangulation(pos[:, 0], pos[:, 1])
|
||||
plt.figure(figsize=(24, 5))
|
||||
plt.subplot(1, 4, 1)
|
||||
plt.tricontourf(tria, y.squeeze().numpy(), levels=100)
|
||||
plt.colorbar()
|
||||
plt.title("Step t-1")
|
||||
plt.subplot(1, 4, 2)
|
||||
plt.tricontourf(tria, y_pred.squeeze().numpy(), levels=100)
|
||||
plt.figure(figsize=(18, 6))
|
||||
# plt.subplot(1, 4, 1)
|
||||
# plt.tricontourf(tria, y.squeeze().numpy(), levels=100)
|
||||
# plt.colorbar()
|
||||
# plt.title("Step t-1")
|
||||
# plt.tripcolor(tria, y_pred.squeeze().numpy()
|
||||
# plt.savefig("test_scatter_step_before.png", dpi=72)
|
||||
# x = z
|
||||
plt.subplot(1, 3, 1)
|
||||
# plt.tricontourf(tria, y_pred.squeeze().numpy(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=y_pred.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
)
|
||||
plt.colorbar()
|
||||
plt.title("Step t Predicted")
|
||||
plt.subplot(1, 4, 3)
|
||||
plt.tricontourf(tria, y_true.squeeze().numpy(), levels=100)
|
||||
plt.subplot(1, 3, 2)
|
||||
# plt.tricontourf(tria, y_true.squeeze().numpy(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=y_true.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
)
|
||||
plt.colorbar()
|
||||
plt.title("t True")
|
||||
plt.subplot(1, 4, 4)
|
||||
plt.tricontourf(tria, (y_true - y_pred).squeeze().numpy(), levels=100)
|
||||
plt.subplot(1, 3, 3)
|
||||
per_element_relative_error = torch.abs(y_pred - y_true) / torch.clamp(
|
||||
torch.abs(y_true), min=1e-6
|
||||
)
|
||||
# plt.tricontourf(tria, per_element_relative_error.squeeze(), levels=100)
|
||||
plt.scatter(
|
||||
pos[:, 0],
|
||||
pos[:, 1],
|
||||
c=per_element_relative_error.squeeze().numpy(),
|
||||
s=20,
|
||||
cmap="viridis",
|
||||
)
|
||||
plt.colorbar()
|
||||
plt.title("Error")
|
||||
plt.title("Relative Error")
|
||||
plt.suptitle("GNO", fontsize=16)
|
||||
name = f"{folder}/{j:04d}_graph_iter_{i:04d}.png"
|
||||
plt.savefig(name, dpi=72)
|
||||
plt.close()
|
||||
|
||||
|
||||
def _plot_losses(losses, batch_idx):
|
||||
def _plot_losses(test_losses, batch_idx):
|
||||
folder = f"{batch_idx:02d}_images"
|
||||
plt.figure()
|
||||
plt.plot(losses)
|
||||
for i, losses in enumerate(test_losses):
|
||||
plt.plot(losses)
|
||||
if i == 3:
|
||||
break
|
||||
plt.yscale("log")
|
||||
plt.xlabel("Iteration")
|
||||
plt.ylabel("Loss")
|
||||
plt.ylabel("Relative Error")
|
||||
plt.title("Test Loss over Iterations")
|
||||
plt.grid(True)
|
||||
file_name = f"{folder}/test_loss.png"
|
||||
@@ -80,6 +111,7 @@ class GraphSolver(LightningModule):
|
||||
# print(f"Param: {param[0]}")
|
||||
self.loss = loss if loss is not None else torch.nn.MSELoss()
|
||||
self.unrolling_steps = unrolling_steps
|
||||
self.test_losses = []
|
||||
|
||||
def _compute_loss(self, x, y):
|
||||
return self.loss(x, y)
|
||||
@@ -149,7 +181,7 @@ class GraphSolver(LightningModule):
|
||||
self._log_loss(loss, batch, "train")
|
||||
for i, layer in enumerate(self.model.layers):
|
||||
self.log(
|
||||
f"alpha_{i}",
|
||||
f"{i:03d}_alpha",
|
||||
layer.alpha,
|
||||
prog_bar=True,
|
||||
on_epoch=True,
|
||||
@@ -205,10 +237,10 @@ class GraphSolver(LightningModule):
|
||||
self._log_loss(loss, batch, "val")
|
||||
return loss
|
||||
|
||||
def _check_convergence(self, y_pred, y_true, tol=1e-3):
|
||||
l2_norm = torch.norm(y_pred - y_true, p=2)
|
||||
y_true_norm = torch.norm(y_true, p=2)
|
||||
rel_error = l2_norm / (y_true_norm + 1e-8)
|
||||
def _check_convergence(self, y_new, y_old, tol=1e-3):
|
||||
l2_norm = torch.norm(y_new, p=2) - torch.norm(y_old, p=2)
|
||||
y_old_norm = torch.norm(y_old, p=2)
|
||||
rel_error = l2_norm / (y_old_norm)
|
||||
return rel_error.item() < tol
|
||||
|
||||
def test_step(self, batch: Batch, batch_idx):
|
||||
@@ -219,7 +251,9 @@ class GraphSolver(LightningModule):
|
||||
losses = []
|
||||
all_losses = []
|
||||
norms = []
|
||||
for i in range(self.unrolling_steps):
|
||||
sequence_length = y.size(1)
|
||||
y = y[:, -1, :].unsqueeze(1)
|
||||
for i in range(100):
|
||||
out = self._compute_model_steps(
|
||||
# torch.cat([x,pos], dim=-1),
|
||||
x,
|
||||
@@ -231,34 +265,38 @@ class GraphSolver(LightningModule):
|
||||
conductivity,
|
||||
)
|
||||
norms.append(torch.norm(out - x, p=2).item())
|
||||
converged = self._check_convergence(out, x)
|
||||
if batch_idx == 0:
|
||||
_plot_mesh(
|
||||
batch.pos,
|
||||
x,
|
||||
out,
|
||||
y[:, -1, :],
|
||||
batch.batch,
|
||||
i,
|
||||
self.current_epoch,
|
||||
)
|
||||
x = out
|
||||
loss = self.loss(out, y[:, i, :])
|
||||
all_losses.append(loss.item())
|
||||
loss = self.loss(out, y[:, -1, :])
|
||||
relative_error = torch.norm(out - y[:, -1, :], p=2) / torch.norm(
|
||||
y[:, -1, :], p=2
|
||||
)
|
||||
all_losses.append(relative_error.item())
|
||||
losses.append(loss)
|
||||
# if (
|
||||
# batch_idx == 0
|
||||
# and self.current_epoch % 10 == 0
|
||||
# and self.current_epoch > 0
|
||||
# ):
|
||||
# _plot_mesh(
|
||||
# batch.pos,
|
||||
# x,
|
||||
# out,
|
||||
# y[:, i, :],
|
||||
# batch.batch,
|
||||
# i,
|
||||
# self.current_epoch,
|
||||
# )
|
||||
if converged:
|
||||
print(
|
||||
f"Test step converged at iteration {i} for batch {batch_idx}"
|
||||
)
|
||||
break
|
||||
loss = torch.stack(losses).mean()
|
||||
# if (
|
||||
# batch_idx == 0
|
||||
# and self.current_epoch % 10 == 0
|
||||
# and self.current_epoch > 0
|
||||
# ):
|
||||
_plot_losses(norms, self.current_epoch)
|
||||
self.test_losses.append(all_losses)
|
||||
self._log_loss(loss, batch, "test")
|
||||
return loss
|
||||
|
||||
def on_test_end(self):
|
||||
if len(self.test_losses) > 0:
|
||||
_plot_losses(self.test_losses, batch_idx=0)
|
||||
|
||||
def configure_optimizers(self):
|
||||
optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3)
|
||||
return optimizer
|
||||
|
||||
Reference in New Issue
Block a user