Correct codacy warnings
This commit is contained in:
committed by
Nicola Demo
parent
c9304fb9bb
commit
1bc1b3a580
@@ -1,12 +1,6 @@
|
||||
__all__ = [
|
||||
"PINN",
|
||||
"Trainer",
|
||||
"LabelTensor",
|
||||
"Plotter",
|
||||
"Condition",
|
||||
"SamplePointDataset",
|
||||
"PinaDataModule",
|
||||
"PinaDataLoader"
|
||||
"PINN", "Trainer", "LabelTensor", "Plotter", "Condition",
|
||||
"SamplePointDataset", "PinaDataModule", "PinaDataLoader"
|
||||
]
|
||||
|
||||
from .meta import *
|
||||
@@ -17,4 +11,4 @@ from .plotter import Plotter
|
||||
from .condition.condition import Condition
|
||||
from .data import SamplePointDataset
|
||||
from .data import PinaDataModule
|
||||
from .data import PinaDataLoader
|
||||
from .data import PinaDataLoader
|
||||
|
||||
@@ -2,13 +2,8 @@
|
||||
Import data classes
|
||||
"""
|
||||
__all__ = [
|
||||
'PinaDataLoader',
|
||||
'SupervisedDataset',
|
||||
'SamplePointDataset',
|
||||
'UnsupervisedDataset',
|
||||
'Batch',
|
||||
'PinaDataModule',
|
||||
'BaseDataset'
|
||||
'PinaDataLoader', 'SupervisedDataset', 'SamplePointDataset',
|
||||
'UnsupervisedDataset', 'Batch', 'PinaDataModule', 'BaseDataset'
|
||||
]
|
||||
|
||||
from .pina_dataloader import PinaDataLoader
|
||||
|
||||
@@ -22,10 +22,12 @@ class BaseDataset(Dataset):
|
||||
dataset will be loaded.
|
||||
"""
|
||||
if cls is BaseDataset:
|
||||
raise TypeError('BaseDataset cannot be instantiated directly. Use a subclass.')
|
||||
raise TypeError(
|
||||
'BaseDataset cannot be instantiated directly. Use a subclass.')
|
||||
if not hasattr(cls, '__slots__'):
|
||||
raise TypeError('Something is wrong, __slots__ must be defined in subclasses.')
|
||||
return super().__new__(cls)
|
||||
raise TypeError(
|
||||
'Something is wrong, __slots__ must be defined in subclasses.')
|
||||
return super(BaseDataset, cls).__new__(cls)
|
||||
|
||||
def __init__(self, problem, device):
|
||||
""""
|
||||
@@ -79,7 +81,8 @@ class BaseDataset(Dataset):
|
||||
|
||||
def __getattribute__(self, item):
|
||||
attribute = super().__getattribute__(item)
|
||||
if isinstance(attribute, LabelTensor) and attribute.dtype == torch.float32:
|
||||
if isinstance(attribute,
|
||||
LabelTensor) and attribute.dtype == torch.float32:
|
||||
attribute = attribute.to(device=self.device).requires_grad_()
|
||||
return attribute
|
||||
|
||||
@@ -101,7 +104,8 @@ class BaseDataset(Dataset):
|
||||
if all(isinstance(x, int) for x in idx):
|
||||
to_return_list = []
|
||||
for i in self.__slots__:
|
||||
to_return_list.append(getattr(self, i)[[idx]].to(self.device))
|
||||
to_return_list.append(
|
||||
getattr(self, i)[[idx]].to(self.device))
|
||||
return to_return_list
|
||||
|
||||
raise ValueError(f'Invalid index {idx}')
|
||||
|
||||
@@ -5,6 +5,7 @@ from .pina_subset import PinaSubset
|
||||
|
||||
|
||||
class Batch:
|
||||
|
||||
def __init__(self, dataset_dict, idx_dict):
|
||||
|
||||
for k, v in dataset_dict.items():
|
||||
@@ -29,5 +30,6 @@ class Batch:
|
||||
def __getattr__(self, item):
|
||||
if not item in dir(self):
|
||||
raise AttributeError(f'Batch instance has no attribute {item}')
|
||||
return PinaSubset(getattr(self, item).dataset,
|
||||
getattr(self, item).indices[self.coordinates_dict[item]])
|
||||
return PinaSubset(
|
||||
getattr(self, item).dataset,
|
||||
getattr(self, item).indices[self.coordinates_dict[item]])
|
||||
|
||||
@@ -50,7 +50,8 @@ class PinaDataLoader:
|
||||
temp_dict[k] = slice(i * v, (i + 1) * v)
|
||||
else:
|
||||
temp_dict[k] = slice(i * v, len(self.dataset_dict[k]))
|
||||
self.batches.append(Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict))
|
||||
self.batches.append(
|
||||
Batch(idx_dict=temp_dict, dataset_dict=self.dataset_dict))
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
|
||||
@@ -5,6 +5,7 @@ import torch
|
||||
from ..utils import check_consistency
|
||||
from .optimizer_interface import Optimizer
|
||||
|
||||
|
||||
class TorchOptimizer(Optimizer):
|
||||
|
||||
def __init__(self, optimizer_class, **kwargs):
|
||||
@@ -14,6 +15,5 @@ class TorchOptimizer(Optimizer):
|
||||
self.kwargs = kwargs
|
||||
|
||||
def hook(self, parameters):
|
||||
self.optimizer_instance = self.optimizer_class(
|
||||
parameters, **self.kwargs
|
||||
)
|
||||
self.optimizer_instance = self.optimizer_class(parameters,
|
||||
**self.kwargs)
|
||||
|
||||
@@ -5,13 +5,13 @@ try:
|
||||
from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
|
||||
except ImportError:
|
||||
from torch.optim.lr_scheduler import (
|
||||
_LRScheduler as LRScheduler,
|
||||
) # torch < 2.0
|
||||
_LRScheduler as LRScheduler, ) # torch < 2.0
|
||||
|
||||
from ..utils import check_consistency
|
||||
from .optimizer_interface import Optimizer
|
||||
from .scheduler_interface import Scheduler
|
||||
|
||||
|
||||
class TorchScheduler(Scheduler):
|
||||
|
||||
def __init__(self, scheduler_class, **kwargs):
|
||||
@@ -23,5 +23,4 @@ class TorchScheduler(Scheduler):
|
||||
def hook(self, optimizer):
|
||||
check_consistency(optimizer, Optimizer)
|
||||
self.scheduler_instance = self.scheduler_class(
|
||||
optimizer.optimizer_instance, **self.kwargs
|
||||
)
|
||||
optimizer.optimizer_instance, **self.kwargs)
|
||||
|
||||
@@ -17,15 +17,13 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
LightningModule methods.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
models,
|
||||
problem,
|
||||
optimizers,
|
||||
schedulers,
|
||||
extra_features,
|
||||
use_lt=True
|
||||
):
|
||||
def __init__(self,
|
||||
models,
|
||||
problem,
|
||||
optimizers,
|
||||
schedulers,
|
||||
extra_features,
|
||||
use_lt=True):
|
||||
"""
|
||||
:param model: A torch neural network model instance.
|
||||
:type model: torch.nn.Module
|
||||
@@ -55,10 +53,11 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
if use_lt is True:
|
||||
for idx in range(len(models)):
|
||||
models[idx] = Network(
|
||||
model = models[idx],
|
||||
model=models[idx],
|
||||
input_variables=problem.input_variables,
|
||||
output_variables=problem.output_variables,
|
||||
extra_features=extra_features, )
|
||||
extra_features=extra_features,
|
||||
)
|
||||
|
||||
#Check scheduler consistency + encapsulation
|
||||
if not isinstance(schedulers, list):
|
||||
@@ -79,11 +78,9 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
|
||||
# check length consistency optimizers
|
||||
if len_model != len_optimizer:
|
||||
raise ValueError(
|
||||
"You must define one optimizer for each model."
|
||||
f"Got {len_model} models, and {len_optimizer}"
|
||||
" optimizers."
|
||||
)
|
||||
raise ValueError("You must define one optimizer for each model."
|
||||
f"Got {len_model} models, and {len_optimizer}"
|
||||
" optimizers.")
|
||||
|
||||
# extra features handling
|
||||
|
||||
@@ -92,7 +89,6 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
self._pina_schedulers = schedulers
|
||||
self._pina_problem = problem
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, *args, **kwargs):
|
||||
pass
|
||||
@@ -142,5 +138,8 @@ class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta):
|
||||
TODO
|
||||
"""
|
||||
for _, condition in problem.conditions.items():
|
||||
if not set(self.accepted_condition_types).issubset(condition.condition_type):
|
||||
raise ValueError(f'{self.__name__} support only dose not support condition {condition.condition_type}')
|
||||
if not set(self.accepted_condition_types).issubset(
|
||||
condition.condition_type):
|
||||
raise ValueError(
|
||||
f'{self.__name__} support only dose not support condition {condition.condition_type}'
|
||||
)
|
||||
|
||||
@@ -40,15 +40,13 @@ class SupervisedSolver(SolverInterface):
|
||||
accepted_condition_types = ['supervised']
|
||||
__name__ = 'SupervisedSolver'
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
extra_features=None
|
||||
):
|
||||
def __init__(self,
|
||||
problem,
|
||||
model,
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
extra_features=None):
|
||||
"""
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module model: The neural network model to use.
|
||||
@@ -68,16 +66,13 @@ class SupervisedSolver(SolverInterface):
|
||||
optimizer = TorchOptimizer(torch.optim.Adam, lr=0.001)
|
||||
|
||||
if scheduler is None:
|
||||
scheduler = TorchScheduler(
|
||||
torch.optim.lr_scheduler.ConstantLR)
|
||||
scheduler = TorchScheduler(torch.optim.lr_scheduler.ConstantLR)
|
||||
|
||||
super().__init__(
|
||||
models=model,
|
||||
problem=problem,
|
||||
optimizers=optimizer,
|
||||
schedulers=scheduler,
|
||||
extra_features=extra_features
|
||||
)
|
||||
super().__init__(models=model,
|
||||
problem=problem,
|
||||
optimizers=optimizer,
|
||||
schedulers=scheduler,
|
||||
extra_features=extra_features)
|
||||
|
||||
# check consistency
|
||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||
@@ -107,10 +102,8 @@ class SupervisedSolver(SolverInterface):
|
||||
"""
|
||||
self._optimizer.hook(self._model.parameters())
|
||||
self._scheduler.hook(self._optimizer)
|
||||
return (
|
||||
[self._optimizer.optimizer_instance],
|
||||
[self._scheduler.scheduler_instance]
|
||||
)
|
||||
return ([self._optimizer.optimizer_instance],
|
||||
[self._scheduler.scheduler_instance])
|
||||
|
||||
def training_step(self, batch, batch_idx):
|
||||
"""Solver training step.
|
||||
@@ -136,8 +129,7 @@ class SupervisedSolver(SolverInterface):
|
||||
# for data driven mode
|
||||
if not hasattr(condition, "output_points"):
|
||||
raise NotImplementedError(
|
||||
f"{type(self).__name__} works only in data-driven mode."
|
||||
)
|
||||
f"{type(self).__name__} works only in data-driven mode.")
|
||||
|
||||
output_pts = out[condition_idx == condition_id]
|
||||
input_pts = pts[condition_idx == condition_id]
|
||||
@@ -145,9 +137,7 @@ class SupervisedSolver(SolverInterface):
|
||||
input_pts.labels = pts.labels
|
||||
output_pts.labels = out.labels
|
||||
|
||||
loss = (
|
||||
self.loss_data(input_pts=input_pts, output_pts=output_pts)
|
||||
)
|
||||
loss = (self.loss_data(input_pts=input_pts, output_pts=output_pts))
|
||||
loss = loss.as_subclass(torch.Tensor)
|
||||
|
||||
self.log("mean_loss", float(loss), prog_bar=True, logger=True)
|
||||
|
||||
@@ -29,34 +29,49 @@ class Poisson(SpatialProblem):
|
||||
spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]})
|
||||
|
||||
conditions = {
|
||||
'gamma1': Condition(
|
||||
domain=CartesianDomain({'x': [0, 1], 'y': 1}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma2': Condition(
|
||||
domain=CartesianDomain({'x': [0, 1], 'y': 0}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma3': Condition(
|
||||
domain=CartesianDomain({'x': 1, 'y': [0, 1]}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma4': Condition(
|
||||
domain=CartesianDomain({'x': 0, 'y': [0, 1]}),
|
||||
equation=FixedValue(0.0)),
|
||||
'D': Condition(
|
||||
input_points=LabelTensor(torch.rand(size=(100, 2)), ['x', 'y']),
|
||||
equation=my_laplace),
|
||||
'data': Condition(
|
||||
input_points=in_,
|
||||
output_points=out_),
|
||||
'data2': Condition(
|
||||
input_points=in2_,
|
||||
output_points=out2_),
|
||||
'unsupervised': Condition(
|
||||
'gamma1':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 1
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma2':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 0
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma3':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 1,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma4':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 0,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'D':
|
||||
Condition(input_points=LabelTensor(torch.rand(size=(100, 2)),
|
||||
['x', 'y']),
|
||||
equation=my_laplace),
|
||||
'data':
|
||||
Condition(input_points=in_, output_points=out_),
|
||||
'data2':
|
||||
Condition(input_points=in2_, output_points=out2_),
|
||||
'unsupervised':
|
||||
Condition(
|
||||
input_points=LabelTensor(torch.rand(size=(45, 2)), ['x', 'y']),
|
||||
conditional_variables=LabelTensor(torch.ones(size=(45, 1)), ['alpha']),
|
||||
conditional_variables=LabelTensor(torch.ones(size=(45, 1)),
|
||||
['alpha']),
|
||||
),
|
||||
'unsupervised2': Condition(
|
||||
'unsupervised2':
|
||||
Condition(
|
||||
input_points=LabelTensor(torch.rand(size=(90, 2)), ['x', 'y']),
|
||||
conditional_variables=LabelTensor(torch.ones(size=(90, 1)), ['alpha']),
|
||||
conditional_variables=LabelTensor(torch.ones(size=(90, 1)),
|
||||
['alpha']),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -113,32 +128,49 @@ def test_data_module():
|
||||
assert isinstance(loader, PinaDataLoader)
|
||||
assert isinstance(loader, PinaDataLoader)
|
||||
|
||||
data_module = PinaDataModule(poisson, device='cpu', batch_size=10, shuffle=False)
|
||||
data_module = PinaDataModule(poisson,
|
||||
device='cpu',
|
||||
batch_size=10,
|
||||
shuffle=False)
|
||||
data_module.setup()
|
||||
loader = data_module.train_dataloader()
|
||||
assert len(loader) == 24
|
||||
for i in loader:
|
||||
assert len(i) <= 10
|
||||
len_ref = sum([math.ceil(len(dataset) * 0.7) for dataset in data_module.datasets])
|
||||
len_real = sum([len(dataset) for dataset in data_module.splits['train'].values()])
|
||||
len_ref = sum(
|
||||
[math.ceil(len(dataset) * 0.7) for dataset in data_module.datasets])
|
||||
len_real = sum(
|
||||
[len(dataset) for dataset in data_module.splits['train'].values()])
|
||||
assert len_ref == len_real
|
||||
|
||||
supervised_dataset = SupervisedDataset(poisson, device='cpu')
|
||||
data_module = PinaDataModule(poisson, device='cpu', batch_size=10, shuffle=False, datasets=[supervised_dataset])
|
||||
data_module = PinaDataModule(poisson,
|
||||
device='cpu',
|
||||
batch_size=10,
|
||||
shuffle=False,
|
||||
datasets=[supervised_dataset])
|
||||
data_module.setup()
|
||||
loader = data_module.train_dataloader()
|
||||
for batch in loader:
|
||||
assert len(batch) <= 10
|
||||
|
||||
physics_dataset = SamplePointDataset(poisson, device='cpu')
|
||||
data_module = PinaDataModule(poisson, device='cpu', batch_size=10, shuffle=False, datasets=[physics_dataset])
|
||||
data_module = PinaDataModule(poisson,
|
||||
device='cpu',
|
||||
batch_size=10,
|
||||
shuffle=False,
|
||||
datasets=[physics_dataset])
|
||||
data_module.setup()
|
||||
loader = data_module.train_dataloader()
|
||||
for batch in loader:
|
||||
assert len(batch) <= 10
|
||||
|
||||
unsupervised_dataset = UnsupervisedDataset(poisson, device='cpu')
|
||||
data_module = PinaDataModule(poisson, device='cpu', batch_size=10, shuffle=False, datasets=[unsupervised_dataset])
|
||||
data_module = PinaDataModule(poisson,
|
||||
device='cpu',
|
||||
batch_size=10,
|
||||
shuffle=False,
|
||||
datasets=[unsupervised_dataset])
|
||||
data_module.setup()
|
||||
loader = data_module.train_dataloader()
|
||||
for batch in loader:
|
||||
@@ -159,4 +191,6 @@ def test_loader():
|
||||
assert i.supervised.input_points.requires_grad == True
|
||||
assert i.physics.input_points.requires_grad == True
|
||||
assert i.unsupervised.input_points.requires_grad == True
|
||||
test_loader()
|
||||
|
||||
|
||||
test_loader()
|
||||
|
||||
@@ -4,29 +4,23 @@ import pytest
|
||||
from pina.label_tensor import LabelTensor
|
||||
|
||||
data = torch.rand((20, 3))
|
||||
labels_column = {
|
||||
1: {
|
||||
"name": "space",
|
||||
"dof": ['x', 'y', 'z']
|
||||
}
|
||||
}
|
||||
labels_row = {
|
||||
0: {
|
||||
"name": "samples",
|
||||
"dof": range(20)
|
||||
}
|
||||
}
|
||||
labels_column = {1: {"name": "space", "dof": ['x', 'y', 'z']}}
|
||||
labels_row = {0: {"name": "samples", "dof": range(20)}}
|
||||
labels_list = ['x', 'y', 'z']
|
||||
labels_all = labels_column | labels_row
|
||||
|
||||
@pytest.mark.parametrize("labels", [labels_column, labels_row, labels_all, labels_list])
|
||||
|
||||
@pytest.mark.parametrize("labels",
|
||||
[labels_column, labels_row, labels_all, labels_list])
|
||||
def test_constructor(labels):
|
||||
print(LabelTensor(data, labels))
|
||||
|
||||
|
||||
def test_wrong_constructor():
|
||||
with pytest.raises(ValueError):
|
||||
LabelTensor(data, ['a', 'b'])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("labels", [labels_column, labels_all])
|
||||
@pytest.mark.parametrize("labels_te", ['z', ['z'], {'space': ['z']}])
|
||||
def test_extract_column(labels, labels_te):
|
||||
@@ -37,6 +31,7 @@ def test_extract_column(labels, labels_te):
|
||||
assert new.shape[0] == 20
|
||||
assert torch.all(torch.isclose(data[:, 2].reshape(-1, 1), new))
|
||||
|
||||
|
||||
@pytest.mark.parametrize("labels", [labels_row, labels_all])
|
||||
@pytest.mark.parametrize("labels_te", [{'samples': [2]}])
|
||||
def test_extract_row(labels, labels_te):
|
||||
@@ -47,10 +42,14 @@ def test_extract_row(labels, labels_te):
|
||||
assert new.shape[0] == 1
|
||||
assert torch.all(torch.isclose(data[2].reshape(1, -1), new))
|
||||
|
||||
@pytest.mark.parametrize("labels_te", [
|
||||
{'samples': [2], 'space': ['z']},
|
||||
{'space': 'z', 'samples': 2}
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize("labels_te", [{
|
||||
'samples': [2],
|
||||
'space': ['z']
|
||||
}, {
|
||||
'space': 'z',
|
||||
'samples': 2
|
||||
}])
|
||||
def test_extract_2D(labels_te):
|
||||
labels = labels_all
|
||||
tensor = LabelTensor(data, labels)
|
||||
@@ -58,7 +57,8 @@ def test_extract_2D(labels_te):
|
||||
assert new.ndim == tensor.ndim
|
||||
assert new.shape[1] == 1
|
||||
assert new.shape[0] == 1
|
||||
assert torch.all(torch.isclose(data[2,2].reshape(1, 1), new))
|
||||
assert torch.all(torch.isclose(data[2, 2].reshape(1, 1), new))
|
||||
|
||||
|
||||
def test_extract_3D():
|
||||
data = torch.rand(20, 3, 4)
|
||||
@@ -72,10 +72,7 @@ def test_extract_3D():
|
||||
"dof": range(4)
|
||||
},
|
||||
}
|
||||
labels_te = {
|
||||
'space': ['x', 'z'],
|
||||
'time': range(1, 4)
|
||||
}
|
||||
labels_te = {'space': ['x', 'z'], 'time': range(1, 4)}
|
||||
|
||||
tensor = LabelTensor(data, labels)
|
||||
new = tensor.extract(labels_te)
|
||||
@@ -84,15 +81,13 @@ def test_extract_3D():
|
||||
assert new.shape[0] == 20
|
||||
assert new.shape[1] == 2
|
||||
assert new.shape[2] == 3
|
||||
assert torch.all(torch.isclose(
|
||||
data[:, 0::2, 1:4].reshape(20, 2, 3),
|
||||
new
|
||||
))
|
||||
assert torch.all(torch.isclose(data[:, 0::2, 1:4].reshape(20, 2, 3), new))
|
||||
assert tensor2.ndim == tensor.ndim
|
||||
assert tensor2.shape == tensor.shape
|
||||
assert tensor.full_labels == tensor2.full_labels
|
||||
assert new.shape != tensor.shape
|
||||
|
||||
|
||||
def test_concatenation_3D():
|
||||
data_1 = torch.rand(20, 3, 4)
|
||||
labels_1 = ['x', 'y', 'z', 'w']
|
||||
@@ -152,27 +147,28 @@ def test_concatenation_3D():
|
||||
|
||||
|
||||
def test_summation():
|
||||
lt1 = LabelTensor(torch.ones(20,3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(30,3), ['x', 'y', 'z'])
|
||||
lt1 = LabelTensor(torch.ones(20, 3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(30, 3), ['x', 'y', 'z'])
|
||||
with pytest.raises(RuntimeError):
|
||||
LabelTensor.summation([lt1, lt2])
|
||||
lt1 = LabelTensor(torch.ones(20,3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(20,3), labels_all)
|
||||
lt1 = LabelTensor(torch.ones(20, 3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(20, 3), labels_all)
|
||||
lt_sum = LabelTensor.summation([lt1, lt2])
|
||||
assert lt_sum.ndim == lt_sum.ndim
|
||||
assert lt_sum.shape[0] == 20
|
||||
assert lt_sum.shape[1] == 3
|
||||
assert lt_sum.full_labels == labels_all
|
||||
assert torch.eq(lt_sum.tensor, torch.ones(20,3)*2).all()
|
||||
lt1 = LabelTensor(torch.ones(20,3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(20,3), labels_all)
|
||||
assert torch.eq(lt_sum.tensor, torch.ones(20, 3) * 2).all()
|
||||
lt1 = LabelTensor(torch.ones(20, 3), labels_all)
|
||||
lt2 = LabelTensor(torch.ones(20, 3), labels_all)
|
||||
lt3 = LabelTensor(torch.zeros(20, 3), labels_all)
|
||||
lt_sum = LabelTensor.summation([lt1, lt2, lt3])
|
||||
assert lt_sum.ndim == lt_sum.ndim
|
||||
assert lt_sum.shape[0] == 20
|
||||
assert lt_sum.shape[1] == 3
|
||||
assert lt_sum.full_labels == labels_all
|
||||
assert torch.eq(lt_sum.tensor, torch.ones(20,3)*2).all()
|
||||
assert torch.eq(lt_sum.tensor, torch.ones(20, 3) * 2).all()
|
||||
|
||||
|
||||
def test_append_3D():
|
||||
data_1 = torch.rand(20, 3, 2)
|
||||
@@ -187,6 +183,7 @@ def test_append_3D():
|
||||
assert lt1.full_labels[1]['dof'] == range(3)
|
||||
assert lt1.full_labels[2]['dof'] == ['x', 'y', 'z', 'w']
|
||||
|
||||
|
||||
def test_append_2D():
|
||||
data_1 = torch.rand(20, 2)
|
||||
labels_1 = ['x', 'y']
|
||||
@@ -199,12 +196,31 @@ def test_append_2D():
|
||||
assert lt1.full_labels[0]['dof'] == range(400)
|
||||
assert lt1.full_labels[1]['dof'] == ['x', 'y', 'z', 'w']
|
||||
|
||||
|
||||
def test_vstack_3D():
|
||||
data_1 = torch.rand(20, 3, 2)
|
||||
labels_1 = {1:{'dof': ['a', 'b', 'c'], 'name': 'first'}, 2: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
labels_1 = {
|
||||
1: {
|
||||
'dof': ['a', 'b', 'c'],
|
||||
'name': 'first'
|
||||
},
|
||||
2: {
|
||||
'dof': ['x', 'y'],
|
||||
'name': 'second'
|
||||
}
|
||||
}
|
||||
lt1 = LabelTensor(data_1, labels_1)
|
||||
data_2 = torch.rand(20, 3, 2)
|
||||
labels_1 = {1:{'dof': ['a', 'b', 'c'], 'name': 'first'}, 2: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
labels_1 = {
|
||||
1: {
|
||||
'dof': ['a', 'b', 'c'],
|
||||
'name': 'first'
|
||||
},
|
||||
2: {
|
||||
'dof': ['x', 'y'],
|
||||
'name': 'second'
|
||||
}
|
||||
}
|
||||
lt2 = LabelTensor(data_2, labels_1)
|
||||
lt_stacked = LabelTensor.vstack([lt1, lt2])
|
||||
assert lt_stacked.shape == (40, 3, 2)
|
||||
@@ -214,12 +230,13 @@ def test_vstack_3D():
|
||||
assert lt_stacked.full_labels[1]['name'] == 'first'
|
||||
assert lt_stacked.full_labels[2]['name'] == 'second'
|
||||
|
||||
|
||||
def test_vstack_2D():
|
||||
data_1 = torch.rand(20, 2)
|
||||
labels_1 = { 1: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
labels_1 = {1: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
lt1 = LabelTensor(data_1, labels_1)
|
||||
data_2 = torch.rand(20, 2)
|
||||
labels_1 = { 1: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
labels_1 = {1: {'dof': ['x', 'y'], 'name': 'second'}}
|
||||
lt2 = LabelTensor(data_2, labels_1)
|
||||
lt_stacked = LabelTensor.vstack([lt1, lt2])
|
||||
assert lt_stacked.shape == (40, 2)
|
||||
@@ -228,35 +245,36 @@ def test_vstack_2D():
|
||||
assert lt_stacked.full_labels[0]['name'] == 0
|
||||
assert lt_stacked.full_labels[1]['name'] == 'second'
|
||||
|
||||
|
||||
def test_sorting():
|
||||
data = torch.ones(20, 5)
|
||||
data[:,0] = data[:,0]*4
|
||||
data[:,1] = data[:,1]*2
|
||||
data[:,2] = data[:,2]
|
||||
data[:,3] = data[:,3]*5
|
||||
data[:,4] = data[:,4]*3
|
||||
data[:, 0] = data[:, 0] * 4
|
||||
data[:, 1] = data[:, 1] * 2
|
||||
data[:, 2] = data[:, 2]
|
||||
data[:, 3] = data[:, 3] * 5
|
||||
data[:, 4] = data[:, 4] * 3
|
||||
labels = ['d', 'b', 'a', 'e', 'c']
|
||||
lt_data = LabelTensor(data, labels)
|
||||
lt_sorted = LabelTensor.sort_labels(lt_data)
|
||||
assert lt_sorted.shape == (20,5)
|
||||
assert lt_sorted.shape == (20, 5)
|
||||
assert lt_sorted.labels == ['a', 'b', 'c', 'd', 'e']
|
||||
assert torch.eq(lt_sorted.tensor[:,0], torch.ones(20) * 1).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,1], torch.ones(20) * 2).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,2], torch.ones(20) * 3).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,3], torch.ones(20) * 4).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,4], torch.ones(20) * 5).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 0], torch.ones(20) * 1).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 1], torch.ones(20) * 2).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 2], torch.ones(20) * 3).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 3], torch.ones(20) * 4).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 4], torch.ones(20) * 5).all()
|
||||
|
||||
data = torch.ones(20, 4, 5)
|
||||
data[:,0,:] = data[:,0]*4
|
||||
data[:,1,:] = data[:,1]*2
|
||||
data[:,2,:] = data[:,2]
|
||||
data[:,3,:] = data[:,3]*3
|
||||
data[:, 0, :] = data[:, 0] * 4
|
||||
data[:, 1, :] = data[:, 1] * 2
|
||||
data[:, 2, :] = data[:, 2]
|
||||
data[:, 3, :] = data[:, 3] * 3
|
||||
labels = {1: {'dof': ['d', 'b', 'a', 'c'], 'name': 1}}
|
||||
lt_data = LabelTensor(data, labels)
|
||||
lt_sorted = LabelTensor.sort_labels(lt_data, dim=1)
|
||||
assert lt_sorted.shape == (20,4, 5)
|
||||
assert lt_sorted.shape == (20, 4, 5)
|
||||
assert lt_sorted.full_labels[1]['dof'] == ['a', 'b', 'c', 'd']
|
||||
assert torch.eq(lt_sorted.tensor[:,0,:], torch.ones(20,5) * 1).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,1,:], torch.ones(20,5) * 2).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,2,:], torch.ones(20,5) * 3).all()
|
||||
assert torch.eq(lt_sorted.tensor[:,3,:], torch.ones(20,5) * 4).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 0, :], torch.ones(20, 5) * 1).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 1, :], torch.ones(20, 5) * 2).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 2, :], torch.ones(20, 5) * 3).all()
|
||||
assert torch.eq(lt_sorted.tensor[:, 3, :], torch.ones(20, 5) * 4).all()
|
||||
|
||||
@@ -54,9 +54,8 @@ def test_extract_order():
|
||||
label_to_extract = ['c', 'a']
|
||||
tensor = LabelTensor(data, labels)
|
||||
new = tensor.extract(label_to_extract)
|
||||
expected = torch.cat(
|
||||
(data[:, 2].reshape(-1, 1), data[:, 0].reshape(-1, 1)),
|
||||
dim=1)
|
||||
expected = torch.cat((data[:, 2].reshape(-1, 1), data[:, 0].reshape(-1, 1)),
|
||||
dim=1)
|
||||
assert new.labels == label_to_extract
|
||||
assert new.shape[1] == len(label_to_extract)
|
||||
assert torch.all(torch.isclose(expected, new))
|
||||
@@ -91,6 +90,7 @@ def test_getitem():
|
||||
assert tensor_view.labels == ['a', 'c']
|
||||
assert torch.allclose(tensor_view, data[:, 0::2])
|
||||
|
||||
|
||||
def test_getitem2():
|
||||
tensor = LabelTensor(data, labels)
|
||||
tensor_view = tensor[:5]
|
||||
@@ -101,6 +101,7 @@ def test_getitem2():
|
||||
tensor_view = tensor[idx]
|
||||
assert tensor_view.labels == labels
|
||||
|
||||
|
||||
def test_slice():
|
||||
tensor = LabelTensor(data, labels)
|
||||
tensor_view = tensor[:5, :2]
|
||||
@@ -114,4 +115,4 @@ def test_slice():
|
||||
|
||||
tensor_view3 = tensor[:, 2]
|
||||
assert tensor_view3.labels == labels[2]
|
||||
assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1))
|
||||
assert torch.allclose(tensor_view3, data[:, 2].reshape(-1, 1))
|
||||
|
||||
@@ -39,6 +39,7 @@ def test_grad_scalar_output():
|
||||
]
|
||||
assert torch.allclose(grad_tensor_s, true_val)
|
||||
|
||||
|
||||
def test_grad_vector_output():
|
||||
grad_tensor_v = grad(tensor_v, inp)
|
||||
true_val = torch.cat(
|
||||
@@ -75,6 +76,7 @@ def test_grad_vector_output():
|
||||
]
|
||||
assert torch.allclose(grad_tensor_v, true_val)
|
||||
|
||||
|
||||
def test_div_vector_output():
|
||||
div_tensor_v = div(tensor_v, inp)
|
||||
true_val = 2*torch.sum(inp, dim=1).reshape(-1,1)
|
||||
@@ -88,6 +90,7 @@ def test_div_vector_output():
|
||||
assert div_tensor_v.labels == [f'dadx+dbdy']
|
||||
assert torch.allclose(div_tensor_v, true_val)
|
||||
|
||||
|
||||
def test_laplacian_scalar_output():
|
||||
laplace_tensor_s = laplacian(tensor_s, inp)
|
||||
true_val = 6*torch.ones_like(laplace_tensor_s)
|
||||
@@ -101,6 +104,7 @@ def test_laplacian_scalar_output():
|
||||
assert laplace_tensor_s.labels == [f"dd{tensor_s.labels[0]}"]
|
||||
assert torch.allclose(laplace_tensor_s, true_val)
|
||||
|
||||
|
||||
def test_laplacian_vector_output():
|
||||
laplace_tensor_v = laplacian(tensor_v, inp)
|
||||
true_val = 2*torch.ones_like(tensor_v)
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
|
||||
import torch
|
||||
import pytest
|
||||
from pina import TorchOptimizer
|
||||
|
||||
opt_list = [
|
||||
torch.optim.Adam,
|
||||
torch.optim.AdamW,
|
||||
torch.optim.SGD,
|
||||
torch.optim.RMSprop
|
||||
torch.optim.Adam, torch.optim.AdamW, torch.optim.SGD, torch.optim.RMSprop
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("optimizer_class", opt_list)
|
||||
def test_constructor(optimizer_class):
|
||||
TorchOptimizer(optimizer_class, lr=1e-3)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("optimizer_class", opt_list)
|
||||
def test_hook(optimizer_class):
|
||||
opt = TorchOptimizer(optimizer_class, lr=1e-3)
|
||||
opt.hook(torch.nn.Linear(10, 10).parameters())
|
||||
opt.hook(torch.nn.Linear(10, 10).parameters())
|
||||
|
||||
@@ -27,42 +27,42 @@ class Poisson(SpatialProblem):
|
||||
|
||||
conditions = {
|
||||
'gamma1':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 1
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 1
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma2':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 0
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': 0
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma3':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 1,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 1,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'gamma4':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 0,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': 0,
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=FixedValue(0.0)),
|
||||
'D':
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=my_laplace),
|
||||
Condition(domain=CartesianDomain({
|
||||
'x': [0, 1],
|
||||
'y': [0, 1]
|
||||
}),
|
||||
equation=my_laplace),
|
||||
'data':
|
||||
Condition(input_points=in_, output_points=out_)
|
||||
Condition(input_points=in_, output_points=out_)
|
||||
}
|
||||
|
||||
def poisson_sol(self, pts):
|
||||
return -(torch.sin(pts.extract(['x']) * torch.pi) *
|
||||
torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi ** 2)
|
||||
torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2)
|
||||
|
||||
truth_solution = poisson_sol
|
||||
|
||||
@@ -79,7 +79,7 @@ def test_discretise_domain():
|
||||
assert poisson_problem.input_pts[b].shape[0] == n
|
||||
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n ** 2
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n**2
|
||||
poisson_problem.discretise_domain(n, 'random', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].shape[0] == n
|
||||
|
||||
@@ -91,6 +91,7 @@ def test_discretise_domain():
|
||||
|
||||
poisson_problem.discretise_domain(n)
|
||||
|
||||
|
||||
def test_sampling_few_variables():
|
||||
n = 10
|
||||
poisson_problem = Poisson()
|
||||
@@ -115,9 +116,8 @@ def test_variables_correct_order_sampling():
|
||||
variables=['y'])
|
||||
assert poisson_problem.input_pts['D'].labels == sorted(
|
||||
poisson_problem.input_variables)
|
||||
poisson_problem.discretise_domain(n,
|
||||
'grid',
|
||||
locations=['D'])
|
||||
|
||||
poisson_problem.discretise_domain(n, 'grid', locations=['D'])
|
||||
assert poisson_problem.input_pts['D'].labels == sorted(
|
||||
poisson_problem.input_variables)
|
||||
poisson_problem.discretise_domain(n,
|
||||
@@ -131,6 +131,7 @@ def test_variables_correct_order_sampling():
|
||||
assert poisson_problem.input_pts['D'].labels == sorted(
|
||||
poisson_problem.input_variables)
|
||||
|
||||
|
||||
def test_add_points():
|
||||
poisson_problem = Poisson()
|
||||
poisson_problem.discretise_domain(0,
|
||||
@@ -139,8 +140,10 @@ def test_add_points():
|
||||
variables=['x', 'y'])
|
||||
new_pts = LabelTensor(torch.tensor([[0.5, -0.5]]), labels=['x', 'y'])
|
||||
poisson_problem.add_points({'D': new_pts})
|
||||
assert torch.isclose(poisson_problem.input_pts['D'].extract('x'), new_pts.extract('x'))
|
||||
assert torch.isclose(poisson_problem.input_pts['D'].extract('y'), new_pts.extract('y'))
|
||||
assert torch.isclose(poisson_problem.input_pts['D'].extract('x'),
|
||||
new_pts.extract('x'))
|
||||
assert torch.isclose(poisson_problem.input_pts['D'].extract('y'),
|
||||
new_pts.extract('y'))
|
||||
|
||||
|
||||
def test_collector():
|
||||
|
||||
Reference in New Issue
Block a user