* solvers -> solver
* adaptive_functions -> adaptive_function
* callbacks -> callback
* operators -> operator
* pinns -> physics_informed_solver
* layers -> block
This commit is contained in:
Dario Coscia
2025-02-19 11:35:43 +01:00
committed by Nicola Demo
parent 810d215ca0
commit df673cad4e
90 changed files with 155 additions and 151 deletions

View File

@@ -0,0 +1,156 @@
import torch
import pytest
from pina import LabelTensor, Condition
from pina.problem import SpatialProblem
from pina.solver import CausalPINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.problem.zoo import (
DiffusionReactionProblem,
InverseDiffusionReactionProblem
)
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from torch._dynamo.eval_frame import OptimizedModule
class DummySpatialProblem(SpatialProblem):
'''
A mock spatial problem for testing purposes.
'''
output_variables = ['u']
conditions = {}
spatial_domain = None
# define problems and model
problem = DiffusionReactionProblem()
problem.discretise_domain(50)
inverse_problem = InverseDiffusionReactionProblem()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("eps", [100, 100.1])
def test_constructor(problem, eps):
with pytest.raises(ValueError):
CausalPINN(model=model, problem=DummySpatialProblem())
solver = CausalPINN(model=model, problem=problem, eps=eps)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, batch_size, compile):
solver = CausalPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, batch_size, compile):
solver = CausalPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, batch_size, compile):
solver = CausalPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = CausalPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = CausalPINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == (
solver.forward(test_pts).shape
)
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,145 @@
import torch
import pytest
from pina import LabelTensor, Condition
from pina.solver import CompetitivePINN as CompPINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.problem.zoo import (
Poisson2DSquareProblem as Poisson,
InversePoisson2DSquareProblem as InversePoisson
)
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from torch._dynamo.eval_frame import OptimizedModule
# define problems and model
problem = Poisson()
problem.discretise_domain(50)
inverse_problem = InversePoisson()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("discr", [None, model])
def test_constructor(problem, discr):
solver = CompPINN(problem=problem, model=model)
solver = CompPINN(problem=problem, model=model, discriminator=discr)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, batch_size, compile):
solver = CompPINN(problem=problem, model=model)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, batch_size, compile):
solver = CompPINN(problem=problem, model=model)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, batch_size, compile):
solver = CompPINN(problem=problem, model=model)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = CompPINN(problem=problem, model=model)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = CompPINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == (
solver.forward(test_pts).shape
)
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,177 @@
import torch
import torch.nn as nn
import pytest
from pina import Condition, LabelTensor
from pina.solver import GAROM
from pina.condition import InputOutputPointsCondition
from pina.problem import AbstractProblem
from pina.model import FeedForward
from pina.trainer import Trainer
from torch._dynamo.eval_frame import OptimizedModule
class TensorProblem(AbstractProblem):
input_variables = ['u_0', 'u_1']
output_variables = ['u']
conditions = {
'data': Condition(
output_points=torch.randn(50, 2),
input_points=torch.randn(50, 1))
}
# simple Generator Network
class Generator(nn.Module):
def __init__(self,
input_dimension=2,
parameters_dimension=1,
noise_dimension=2,
activation=torch.nn.SiLU):
super().__init__()
self._noise_dimension = noise_dimension
self._activation = activation
self.model = FeedForward(6*noise_dimension, input_dimension)
self.condition = FeedForward(parameters_dimension, 5 * noise_dimension)
def forward(self, param):
# uniform sampling in [-1, 1]
z = 2 * torch.rand(size=(param.shape[0], self._noise_dimension),
device=param.device,
dtype=param.dtype,
requires_grad=True) - 1
return self.model(torch.cat((z, self.condition(param)), dim=-1))
# Simple Discriminator Network
class Discriminator(nn.Module):
def __init__(self,
input_dimension=2,
parameter_dimension=1,
hidden_dimension=2,
activation=torch.nn.ReLU):
super().__init__()
self._activation = activation
self.encoding = FeedForward(input_dimension, hidden_dimension)
self.decoding = FeedForward(2*hidden_dimension, input_dimension)
self.condition = FeedForward(parameter_dimension, hidden_dimension)
def forward(self, data):
x, condition = data
encoding = self.encoding(x)
conditioning = torch.cat((encoding, self.condition(condition)), dim=-1)
decoding = self.decoding(conditioning)
return decoding
def test_constructor():
GAROM(problem=TensorProblem(),
generator=Generator(),
discriminator=Discriminator())
assert GAROM.accepted_conditions_types == (
InputOutputPointsCondition
)
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(batch_size, compile):
solver = GAROM(problem=TensorProblem(),
generator=Generator(),
discriminator=Discriminator())
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
test_size=0.,
val_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(batch_size, compile):
solver = GAROM(problem=TensorProblem(),
generator=Generator(),
discriminator=Discriminator())
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(batch_size, compile):
solver = GAROM(problem=TensorProblem(),
generator=Generator(),
discriminator=Discriminator(),
)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.8,
val_size=0.1,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (all([isinstance(model, OptimizedModule)
for model in solver.models]))
def test_train_load_restore():
dir = "tests/test_solver/tmp/"
problem = TensorProblem()
solver = GAROM(problem=TensorProblem(),
generator=Generator(),
discriminator=Discriminator(),
)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.9,
test_size=0.1,
val_size=0.,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = GAROM.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=TensorProblem(), generator=Generator(), discriminator=Discriminator())
test_pts = torch.rand(20, 1)
assert new_solver.forward(test_pts).shape == (20, 2)
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,155 @@
import pytest
import torch
from pina import LabelTensor, Condition
from pina.problem import TimeDependentProblem
from pina.solver import GradientPINN
from pina.model import FeedForward
from pina.trainer import Trainer
from pina.problem.zoo import (
Poisson2DSquareProblem as Poisson,
InversePoisson2DSquareProblem as InversePoisson
)
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from torch._dynamo.eval_frame import OptimizedModule
class DummyTimeProblem(TimeDependentProblem):
"""
A mock time-dependent problem for testing purposes.
"""
output_variables = ['u']
temporal_domain = None
conditions = {}
# define problems and model
problem = Poisson()
problem.discretise_domain(50)
inverse_problem = InversePoisson()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_constructor(problem):
with pytest.raises(ValueError):
GradientPINN(model=model, problem=DummyTimeProblem())
solver = GradientPINN(model=model, problem=problem)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, batch_size, compile):
solver = GradientPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, batch_size, compile):
solver = GradientPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, batch_size, compile):
solver = GradientPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = GradientPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = GradientPINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == (
solver.forward(test_pts).shape
)
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,134 @@
import pytest
import torch
from pina import LabelTensor, Condition
from pina.model import FeedForward
from pina.trainer import Trainer
from pina.solver import PINN
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from pina.problem.zoo import (
Poisson2DSquareProblem as Poisson,
InversePoisson2DSquareProblem as InversePoisson
)
from torch._dynamo.eval_frame import OptimizedModule
# define problems and model
problem = Poisson()
problem.discretise_domain(50)
inverse_problem = InversePoisson()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_constructor(problem):
solver = PINN(problem=problem, model=model)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, batch_size, compile):
solver = PINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, batch_size, compile):
solver = PINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert(isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, batch_size, compile):
solver = PINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = PINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = PINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,157 @@
import pytest
import torch
from pina import LabelTensor, Condition
from pina.model import FeedForward
from pina.trainer import Trainer
from pina.solver import RBAPINN
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from pina.problem.zoo import (
Poisson2DSquareProblem as Poisson,
InversePoisson2DSquareProblem as InversePoisson
)
from torch._dynamo.eval_frame import OptimizedModule
# define problems and model
problem = Poisson()
problem.discretise_domain(50)
inverse_problem = InversePoisson()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("eta", [1, 0.001])
@pytest.mark.parametrize("gamma", [0.5, 0.9])
def test_constructor(problem, eta, gamma):
with pytest.raises(AssertionError):
solver = RBAPINN(model=model, problem=problem, gamma=1.5)
solver = RBAPINN(model=model, problem=problem, eta=eta, gamma=gamma)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_wrong_batch(problem):
with pytest.raises(NotImplementedError):
solver = RBAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=10,
train_size=1.,
val_size=0.,
test_size=0.)
trainer.train()
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, compile):
solver = RBAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, compile):
solver = RBAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, compile):
solver = RBAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = RBAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = RBAPINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == (
solver.forward(test_pts).shape
)
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,187 @@
import torch
import pytest
from pina import Condition, LabelTensor
from pina.problem import AbstractProblem
from pina.condition import InputOutputPointsCondition
from pina.solver import ReducedOrderModelSolver
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.problem.zoo import Poisson2DSquareProblem
from torch._dynamo.eval_frame import OptimizedModule
class LabelTensorProblem(AbstractProblem):
input_variables = ['u_0', 'u_1']
output_variables = ['u']
conditions = {
'data': Condition(
input_points=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']),
output_points=LabelTensor(torch.randn(20, 1), ['u'])),
}
class TensorProblem(AbstractProblem):
input_variables = ['u_0', 'u_1']
output_variables = ['u']
conditions = {
'data': Condition(
input_points=torch.randn(20, 2),
output_points=torch.randn(20, 1))
}
class AE(torch.nn.Module):
def __init__(self, input_dimensions, rank):
super().__init__()
self.encode = FeedForward(
input_dimensions, rank, layers=[input_dimensions//4])
self.decode = FeedForward(
rank, input_dimensions, layers=[input_dimensions//4])
class AE_missing_encode(torch.nn.Module):
def __init__(self, input_dimensions, rank):
super().__init__()
self.encode = FeedForward(
input_dimensions, rank, layers=[input_dimensions//4])
class AE_missing_decode(torch.nn.Module):
def __init__(self, input_dimensions, rank):
super().__init__()
self.decode = FeedForward(
rank, input_dimensions, layers=[input_dimensions//4])
rank = 10
model = AE(2, 1)
interpolation_net = FeedForward(2, rank)
reduction_net = AE(1, rank)
def test_constructor():
problem = TensorProblem()
ReducedOrderModelSolver(problem=problem,
interpolation_network=interpolation_net,
reduction_network=reduction_net)
ReducedOrderModelSolver(problem=LabelTensorProblem(),
reduction_network=reduction_net,
interpolation_network=interpolation_net)
assert ReducedOrderModelSolver.accepted_conditions_types == InputOutputPointsCondition
with pytest.raises(SyntaxError):
ReducedOrderModelSolver(problem=problem,
reduction_network=AE_missing_encode(
len(problem.output_variables), rank),
interpolation_network=interpolation_net)
ReducedOrderModelSolver(problem=problem,
reduction_network=AE_missing_decode(
len(problem.output_variables), rank),
interpolation_network=interpolation_net)
with pytest.raises(ValueError):
ReducedOrderModelSolver(problem=Poisson2DSquareProblem(),
reduction_network=reduction_net,
interpolation_network=interpolation_net)
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(use_lt, batch_size, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = ReducedOrderModelSolver(problem=problem,
reduction_network=reduction_net,
interpolation_network=interpolation_net, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
test_size=0.,
val_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
for v in solver.model.values():
assert (isinstance(v, OptimizedModule))
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(use_lt, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = ReducedOrderModelSolver(problem=problem,
reduction_network=reduction_net,
interpolation_network=interpolation_net, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
for v in solver.model.values():
assert (isinstance(v, OptimizedModule))
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(use_lt, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = ReducedOrderModelSolver(problem=problem,
reduction_network=reduction_net,
interpolation_network=interpolation_net, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.8,
val_size=0.1,
test_size=0.1,
compile=compile)
trainer.train()
if trainer.compile:
for v in solver.model.values():
assert (isinstance(v, OptimizedModule))
def test_train_load_restore():
dir = "tests/test_solver/tmp/"
problem = LabelTensorProblem()
solver = ReducedOrderModelSolver(problem=problem,
reduction_network=reduction_net,
interpolation_network=interpolation_net)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.9,
test_size=0.1,
val_size=0.,
default_root_dir=dir)
trainer.train()
# restore
ntrainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',)
ntrainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt')
# loading
new_solver = ReducedOrderModelSolver.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem,
reduction_network=reduction_net,
interpolation_network=interpolation_net)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,159 @@
import torch
import pytest
from pina import LabelTensor, Condition
from pina.solver import SelfAdaptivePINN as SAPINN
from pina.trainer import Trainer
from pina.model import FeedForward
from pina.problem.zoo import (
Poisson2DSquareProblem as Poisson,
InversePoisson2DSquareProblem as InversePoisson
)
from pina.condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
from torch._dynamo.eval_frame import OptimizedModule
# make the problem
problem = Poisson()
problem.discretise_domain(50)
inverse_problem = InversePoisson()
inverse_problem.discretise_domain(50)
model = FeedForward(
len(problem.input_variables),
len(problem.output_variables)
)
# add input-output condition to test supervised learning
input_pts = torch.rand(50, len(problem.input_variables))
input_pts = LabelTensor(input_pts, problem.input_variables)
output_pts = torch.rand(50, len(problem.output_variables))
output_pts = LabelTensor(output_pts, problem.output_variables)
problem.conditions['data'] = Condition(
input_points=input_pts,
output_points=output_pts
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("weight_fn", [torch.nn.Sigmoid(), torch.nn.Tanh()])
def test_constructor(problem, weight_fn):
with pytest.raises(ValueError):
SAPINN(model=model, problem=problem, weight_function=1)
solver = SAPINN(problem=problem, model=model, weight_function=weight_fn)
assert solver.accepted_conditions_types == (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_wrong_batch(problem):
with pytest.raises(NotImplementedError):
solver = SAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=10,
train_size=1.,
val_size=0.,
test_size=0.)
trainer.train()
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(problem, compile):
solver = SAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=1.,
val_size=0.,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(problem, compile):
solver = SAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(problem, compile):
solver = SAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (all([isinstance(model, (OptimizedModule, torch.nn.ModuleDict))
for model in solver.models]))
@pytest.mark.parametrize("problem", [problem, inverse_problem])
def test_train_load_restore(problem):
dir = "tests/test_solver/tmp"
problem = problem
solver = SAPINN(model=model, problem=problem)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.7,
val_size=0.2,
test_size=0.1,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = SAPINN.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == (
solver.forward(test_pts).shape
)
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')

View File

@@ -0,0 +1,133 @@
import torch
import pytest
from pina import Condition, LabelTensor
from pina.condition import InputOutputPointsCondition
from pina.problem import AbstractProblem
from pina.solver import SupervisedSolver
from pina.model import FeedForward
from pina.trainer import Trainer
from torch._dynamo.eval_frame import OptimizedModule
class LabelTensorProblem(AbstractProblem):
input_variables = ['u_0', 'u_1']
output_variables = ['u']
conditions = {
'data': Condition(
input_points=LabelTensor(torch.randn(20, 2), ['u_0', 'u_1']),
output_points=LabelTensor(torch.randn(20, 1), ['u'])),
}
class TensorProblem(AbstractProblem):
input_variables = ['u_0', 'u_1']
output_variables = ['u']
conditions = {
'data': Condition(
input_points=torch.randn(20, 2),
output_points=torch.randn(20, 1))
}
model = FeedForward(2, 1)
def test_constructor():
SupervisedSolver(problem=TensorProblem(), model=model)
SupervisedSolver(problem=LabelTensorProblem(), model=model)
assert SupervisedSolver.accepted_conditions_types == (
InputOutputPointsCondition
)
@pytest.mark.parametrize("batch_size", [None, 1, 5, 20])
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_train(use_lt, batch_size, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=batch_size,
train_size=1.,
test_size=0.,
val_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_validation(use_lt, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.9,
val_size=0.1,
test_size=0.,
compile=compile)
trainer.train()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
@pytest.mark.parametrize("use_lt", [True, False])
@pytest.mark.parametrize("compile", [True, False])
def test_solver_test(use_lt, compile):
problem = LabelTensorProblem() if use_lt else TensorProblem()
solver = SupervisedSolver(problem=problem, model=model, use_lt=use_lt)
trainer = Trainer(solver=solver,
max_epochs=2,
accelerator='cpu',
batch_size=None,
train_size=0.8,
val_size=0.1,
test_size=0.1,
compile=compile)
trainer.test()
if trainer.compile:
assert (isinstance(solver.model, OptimizedModule))
def test_train_load_restore():
dir = "tests/test_solver/tmp/"
problem = LabelTensorProblem()
solver = SupervisedSolver(problem=problem, model=model)
trainer = Trainer(solver=solver,
max_epochs=5,
accelerator='cpu',
batch_size=None,
train_size=0.9,
test_size=0.1,
val_size=0.,
default_root_dir=dir)
trainer.train()
# restore
new_trainer = Trainer(solver=solver, max_epochs=5, accelerator='cpu')
new_trainer.train(
ckpt_path=f'{dir}/lightning_logs/version_0/checkpoints/' +
'epoch=4-step=5.ckpt')
# loading
new_solver = SupervisedSolver.load_from_checkpoint(
f'{dir}/lightning_logs/version_0/checkpoints/epoch=4-step=5.ckpt',
problem=problem, model=model)
test_pts = LabelTensor(torch.rand(20, 2), problem.input_variables)
assert new_solver.forward(test_pts).shape == (20, 1)
assert new_solver.forward(test_pts).shape == solver.forward(test_pts).shape
torch.testing.assert_close(
new_solver.forward(test_pts),
solver.forward(test_pts))
# rm directories
import shutil
shutil.rmtree('tests/test_solver/tmp')