Simplify Graph class (#459)
* Simplifying Graph class and adjust tests --------- Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
committed by
Nicola Demo
parent
4c3e305b09
commit
ab6ca78d85
@@ -15,16 +15,15 @@ output_tensor = torch.rand((100, 2))
|
||||
|
||||
x = torch.rand((100, 50, 10))
|
||||
pos = torch.rand((100, 50, 2))
|
||||
input_graph = RadiusGraph(x, pos, r=.1, build_edge_attr=True)
|
||||
input_graph = [
|
||||
RadiusGraph(x=x_, pos=pos_, radius=0.2) for x_, pos_, in zip(x, pos)
|
||||
]
|
||||
output_graph = torch.rand((100, 50, 10))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
def test_constructor(input_, output_):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
@@ -33,22 +32,16 @@ def test_constructor(input_, output_):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"train_size, val_size, test_size",
|
||||
[
|
||||
(.7, .2, .1),
|
||||
(.7, .3, 0)
|
||||
]
|
||||
"train_size, val_size, test_size", [(0.7, 0.2, 0.1), (0.7, 0.3, 0)]
|
||||
)
|
||||
def test_setup_train(input_, output_, train_size, val_size, test_size):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
dm = PinaDataModule(problem, train_size=train_size,
|
||||
val_size=val_size, test_size=test_size)
|
||||
dm = PinaDataModule(
|
||||
problem, train_size=train_size, val_size=val_size, test_size=test_size
|
||||
)
|
||||
dm.setup()
|
||||
assert hasattr(dm, "train_dataset")
|
||||
if isinstance(input_, torch.Tensor):
|
||||
@@ -71,23 +64,17 @@ def test_setup_train(input_, output_, train_size, val_size, test_size):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"train_size, val_size, test_size",
|
||||
[
|
||||
(.7, .2, .1),
|
||||
(0., 0., 1.)
|
||||
]
|
||||
"train_size, val_size, test_size", [(0.7, 0.2, 0.1), (0.0, 0.0, 1.0)]
|
||||
)
|
||||
def test_setup_test(input_, output_, train_size, val_size, test_size):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
dm = PinaDataModule(problem, train_size=train_size,
|
||||
val_size=val_size, test_size=test_size)
|
||||
dm.setup(stage='test')
|
||||
dm = PinaDataModule(
|
||||
problem, train_size=train_size, val_size=val_size, test_size=test_size
|
||||
)
|
||||
dm.setup(stage="test")
|
||||
if train_size > 0:
|
||||
assert hasattr(dm, "train_dataset")
|
||||
assert dm.train_dataset is None
|
||||
@@ -109,16 +96,14 @@ def test_setup_test(input_, output_, train_size, val_size, test_size):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
def test_dummy_dataloader(input_, output_):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
solver = SupervisedSolver(problem=problem, model=torch.nn.Linear(10, 10))
|
||||
trainer = Trainer(solver, batch_size=None, train_size=.7,
|
||||
val_size=.3, test_size=0.)
|
||||
trainer = Trainer(
|
||||
solver, batch_size=None, train_size=0.7, val_size=0.3, test_size=0.0
|
||||
)
|
||||
dm = trainer.data_module
|
||||
dm.setup()
|
||||
dm.trainer = trainer
|
||||
@@ -128,11 +113,11 @@ def test_dummy_dataloader(input_, output_):
|
||||
data = next(dataloader)
|
||||
assert isinstance(data, list)
|
||||
assert isinstance(data[0], tuple)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data[0][1]['input_points'], Batch)
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data[0][1]["input_points"], Batch)
|
||||
else:
|
||||
assert isinstance(data[0][1]['input_points'], torch.Tensor)
|
||||
assert isinstance(data[0][1]['output_points'], torch.Tensor)
|
||||
assert isinstance(data[0][1]["input_points"], torch.Tensor)
|
||||
assert isinstance(data[0][1]["output_points"], torch.Tensor)
|
||||
|
||||
dataloader = dm.val_dataloader()
|
||||
assert isinstance(dataloader, DummyDataloader)
|
||||
@@ -140,31 +125,29 @@ def test_dummy_dataloader(input_, output_):
|
||||
data = next(dataloader)
|
||||
assert isinstance(data, list)
|
||||
assert isinstance(data[0], tuple)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data[0][1]['input_points'], Batch)
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data[0][1]["input_points"], Batch)
|
||||
else:
|
||||
assert isinstance(data[0][1]['input_points'], torch.Tensor)
|
||||
assert isinstance(data[0][1]['output_points'], torch.Tensor)
|
||||
assert isinstance(data[0][1]["input_points"], torch.Tensor)
|
||||
assert isinstance(data[0][1]["output_points"], torch.Tensor)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"automatic_batching",
|
||||
[
|
||||
True, False
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
@pytest.mark.parametrize("automatic_batching", [True, False])
|
||||
def test_dataloader(input_, output_, automatic_batching):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
solver = SupervisedSolver(problem=problem, model=torch.nn.Linear(10, 10))
|
||||
trainer = Trainer(solver, batch_size=10, train_size=.7, val_size=.3,
|
||||
test_size=0., automatic_batching=automatic_batching)
|
||||
trainer = Trainer(
|
||||
solver,
|
||||
batch_size=10,
|
||||
train_size=0.7,
|
||||
val_size=0.3,
|
||||
test_size=0.0,
|
||||
automatic_batching=automatic_batching,
|
||||
)
|
||||
dm = trainer.data_module
|
||||
dm.setup()
|
||||
dm.trainer = trainer
|
||||
@@ -173,51 +156,53 @@ def test_dataloader(input_, output_, automatic_batching):
|
||||
assert len(dataloader) == 7
|
||||
data = next(iter(dataloader))
|
||||
assert isinstance(data, dict)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data['data']['input_points'], Batch)
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data["data"]["input_points"], Batch)
|
||||
else:
|
||||
assert isinstance(data['data']['input_points'], torch.Tensor)
|
||||
assert isinstance(data['data']['output_points'], torch.Tensor)
|
||||
assert isinstance(data["data"]["input_points"], torch.Tensor)
|
||||
assert isinstance(data["data"]["output_points"], torch.Tensor)
|
||||
|
||||
dataloader = dm.val_dataloader()
|
||||
assert isinstance(dataloader, DataLoader)
|
||||
assert len(dataloader) == 3
|
||||
data = next(iter(dataloader))
|
||||
assert isinstance(data, dict)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data['data']['input_points'], Batch)
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data["data"]["input_points"], Batch)
|
||||
else:
|
||||
assert isinstance(data['data']['input_points'], torch.Tensor)
|
||||
assert isinstance(data['data']['output_points'], torch.Tensor)
|
||||
assert isinstance(data["data"]["input_points"], torch.Tensor)
|
||||
assert isinstance(data["data"]["output_points"], torch.Tensor)
|
||||
|
||||
|
||||
from pina import LabelTensor
|
||||
|
||||
input_tensor = LabelTensor(torch.rand((100, 3)), ['u', 'v', 'w'])
|
||||
output_tensor = LabelTensor(torch.rand((100, 3)), ['u', 'v', 'w'])
|
||||
input_tensor = LabelTensor(torch.rand((100, 3)), ["u", "v", "w"])
|
||||
output_tensor = LabelTensor(torch.rand((100, 3)), ["u", "v", "w"])
|
||||
|
||||
x = LabelTensor(torch.rand((100, 50, 3)), ["u", "v", "w"])
|
||||
pos = LabelTensor(torch.rand((100, 50, 2)), ["x", "y"])
|
||||
input_graph = [
|
||||
RadiusGraph(x=x[i], pos=pos[i], radius=0.1) for i in range(len(x))
|
||||
]
|
||||
output_graph = LabelTensor(torch.rand((100, 50, 3)), ["u", "v", "w"])
|
||||
|
||||
x = LabelTensor(torch.rand((100, 50, 3)), ['u', 'v', 'w'])
|
||||
pos = LabelTensor(torch.rand((100, 50, 2)), ['x', 'y'])
|
||||
input_graph = RadiusGraph(x, pos, r=.1, build_edge_attr=True)
|
||||
output_graph = LabelTensor(torch.rand((100, 50, 3)), ['u', 'v', 'w'])
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_, output_",
|
||||
[
|
||||
(input_tensor, output_tensor),
|
||||
(input_graph, output_graph)
|
||||
]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"automatic_batching",
|
||||
[
|
||||
True, False
|
||||
]
|
||||
[(input_tensor, output_tensor), (input_graph, output_graph)],
|
||||
)
|
||||
@pytest.mark.parametrize("automatic_batching", [True, False])
|
||||
def test_dataloader_labels(input_, output_, automatic_batching):
|
||||
problem = SupervisedProblem(input_=input_, output_=output_)
|
||||
solver = SupervisedSolver(problem=problem, model=torch.nn.Linear(10, 10))
|
||||
trainer = Trainer(solver, batch_size=10, train_size=.7, val_size=.3,
|
||||
test_size=0., automatic_batching=automatic_batching)
|
||||
trainer = Trainer(
|
||||
solver,
|
||||
batch_size=10,
|
||||
train_size=0.7,
|
||||
val_size=0.3,
|
||||
test_size=0.0,
|
||||
automatic_batching=automatic_batching,
|
||||
)
|
||||
dm = trainer.data_module
|
||||
dm.setup()
|
||||
dm.trainer = trainer
|
||||
@@ -226,31 +211,30 @@ def test_dataloader_labels(input_, output_, automatic_batching):
|
||||
assert len(dataloader) == 7
|
||||
data = next(iter(dataloader))
|
||||
assert isinstance(data, dict)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data['data']['input_points'], Batch)
|
||||
assert isinstance(data['data']['input_points'].x, LabelTensor)
|
||||
assert data['data']['input_points'].x.labels == ['u', 'v', 'w']
|
||||
assert data['data']['input_points'].pos.labels == ['x', 'y']
|
||||
else:
|
||||
assert isinstance(data['data']['input_points'], LabelTensor)
|
||||
assert data['data']['input_points'].labels == ['u', 'v', 'w']
|
||||
assert isinstance(data['data']['output_points'], LabelTensor)
|
||||
assert data['data']['output_points'].labels == ['u', 'v', 'w']
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data["data"]["input_points"], Batch)
|
||||
assert isinstance(data["data"]["input_points"].x, LabelTensor)
|
||||
assert data["data"]["input_points"].x.labels == ["u", "v", "w"]
|
||||
assert data["data"]["input_points"].pos.labels == ["x", "y"]
|
||||
else:
|
||||
assert isinstance(data["data"]["input_points"], LabelTensor)
|
||||
assert data["data"]["input_points"].labels == ["u", "v", "w"]
|
||||
assert isinstance(data["data"]["output_points"], LabelTensor)
|
||||
assert data["data"]["output_points"].labels == ["u", "v", "w"]
|
||||
|
||||
dataloader = dm.val_dataloader()
|
||||
assert isinstance(dataloader, DataLoader)
|
||||
assert len(dataloader) == 3
|
||||
data = next(iter(dataloader))
|
||||
assert isinstance(data, dict)
|
||||
if isinstance(input_, RadiusGraph):
|
||||
assert isinstance(data['data']['input_points'], Batch)
|
||||
assert isinstance(data['data']['input_points'].x, LabelTensor)
|
||||
assert data['data']['input_points'].x.labels == ['u', 'v', 'w']
|
||||
assert data['data']['input_points'].pos.labels == ['x', 'y']
|
||||
if isinstance(input_, list):
|
||||
assert isinstance(data["data"]["input_points"], Batch)
|
||||
assert isinstance(data["data"]["input_points"].x, LabelTensor)
|
||||
assert data["data"]["input_points"].x.labels == ["u", "v", "w"]
|
||||
assert data["data"]["input_points"].pos.labels == ["x", "y"]
|
||||
else:
|
||||
assert isinstance(data['data']['input_points'], torch.Tensor)
|
||||
assert isinstance(data['data']['input_points'], LabelTensor)
|
||||
assert data['data']['input_points'].labels == ['u', 'v', 'w']
|
||||
assert isinstance(data['data']['output_points'], torch.Tensor)
|
||||
assert data['data']['output_points'].labels == ['u', 'v', 'w']
|
||||
test_dataloader_labels(input_graph, output_graph, True)
|
||||
assert isinstance(data["data"]["input_points"], torch.Tensor)
|
||||
assert isinstance(data["data"]["input_points"], LabelTensor)
|
||||
assert data["data"]["input_points"].labels == ["u", "v", "w"]
|
||||
assert isinstance(data["data"]["output_points"], torch.Tensor)
|
||||
assert data["data"]["output_points"].labels == ["u", "v", "w"]
|
||||
|
||||
@@ -6,55 +6,58 @@ from torch_geometric.data import Data
|
||||
|
||||
x = torch.rand((100, 20, 10))
|
||||
pos = torch.rand((100, 20, 2))
|
||||
input_ = KNNGraph(x=x, pos=pos, k=3, build_edge_attr=True)
|
||||
input_ = [
|
||||
KNNGraph(x=x_, pos=pos_, neighbours=3, edge_attr=True)
|
||||
for x_, pos_ in zip(x, pos)
|
||||
]
|
||||
output_ = torch.rand((100, 20, 10))
|
||||
|
||||
x_2 = torch.rand((50, 20, 10))
|
||||
pos_2 = torch.rand((50, 20, 2))
|
||||
input_2_ = KNNGraph(x=x_2, pos=pos_2, k=3, build_edge_attr=True)
|
||||
input_2_ = [
|
||||
KNNGraph(x=x_, pos=pos_, neighbours=3, edge_attr=True)
|
||||
for x_, pos_ in zip(x_2, pos_2)
|
||||
]
|
||||
output_2_ = torch.rand((50, 20, 10))
|
||||
|
||||
|
||||
# Problem with a single condition
|
||||
conditions_dict_single = {
|
||||
'data': {
|
||||
'input_points': input_.data,
|
||||
'output_points': output_,
|
||||
"data": {
|
||||
"input_points": input_,
|
||||
"output_points": output_,
|
||||
}
|
||||
}
|
||||
max_conditions_lengths_single = {
|
||||
'data': 100
|
||||
}
|
||||
max_conditions_lengths_single = {"data": 100}
|
||||
|
||||
# Problem with multiple conditions
|
||||
conditions_dict_single_multi = {
|
||||
'data_1': {
|
||||
'input_points': input_.data,
|
||||
'output_points': output_,
|
||||
"data_1": {
|
||||
"input_points": input_,
|
||||
"output_points": output_,
|
||||
},
|
||||
"data_2": {
|
||||
"input_points": input_2_,
|
||||
"output_points": output_2_,
|
||||
},
|
||||
'data_2': {
|
||||
'input_points': input_2_.data,
|
||||
'output_points': output_2_,
|
||||
}
|
||||
}
|
||||
|
||||
max_conditions_lengths_multi = {
|
||||
'data_1': 100,
|
||||
'data_2': 50
|
||||
}
|
||||
max_conditions_lengths_multi = {"data_1": 100, "data_2": 50}
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"conditions_dict, max_conditions_lengths",
|
||||
[
|
||||
(conditions_dict_single, max_conditions_lengths_single),
|
||||
(conditions_dict_single_multi, max_conditions_lengths_multi)
|
||||
]
|
||||
(conditions_dict_single_multi, max_conditions_lengths_multi),
|
||||
],
|
||||
)
|
||||
def test_constructor(conditions_dict, max_conditions_lengths):
|
||||
dataset = PinaDatasetFactory(conditions_dict,
|
||||
max_conditions_lengths=max_conditions_lengths,
|
||||
automatic_batching=True)
|
||||
dataset = PinaDatasetFactory(
|
||||
conditions_dict,
|
||||
max_conditions_lengths=max_conditions_lengths,
|
||||
automatic_batching=True,
|
||||
)
|
||||
assert isinstance(dataset, PinaGraphDataset)
|
||||
assert len(dataset) == 100
|
||||
|
||||
@@ -63,39 +66,67 @@ def test_constructor(conditions_dict, max_conditions_lengths):
|
||||
"conditions_dict, max_conditions_lengths",
|
||||
[
|
||||
(conditions_dict_single, max_conditions_lengths_single),
|
||||
(conditions_dict_single_multi, max_conditions_lengths_multi)
|
||||
]
|
||||
(conditions_dict_single_multi, max_conditions_lengths_multi),
|
||||
],
|
||||
)
|
||||
def test_getitem(conditions_dict, max_conditions_lengths):
|
||||
dataset = PinaDatasetFactory(conditions_dict,
|
||||
max_conditions_lengths=max_conditions_lengths,
|
||||
automatic_batching=True)
|
||||
dataset = PinaDatasetFactory(
|
||||
conditions_dict,
|
||||
max_conditions_lengths=max_conditions_lengths,
|
||||
automatic_batching=True,
|
||||
)
|
||||
data = dataset[50]
|
||||
assert isinstance(data, dict)
|
||||
assert all([isinstance(d['input_points'], Data)
|
||||
for d in data.values()])
|
||||
assert all([isinstance(d['output_points'], torch.Tensor)
|
||||
for d in data.values()])
|
||||
assert all([d['input_points'].x.shape == torch.Size((20, 10))
|
||||
for d in data.values()])
|
||||
assert all([d['output_points'].shape == torch.Size((20, 10))
|
||||
for d in data.values()])
|
||||
assert all([d['input_points'].edge_index.shape ==
|
||||
torch.Size((2, 60)) for d in data.values()])
|
||||
assert all([d['input_points'].edge_attr.shape[0]
|
||||
== 60 for d in data.values()])
|
||||
assert all([isinstance(d["input_points"], Data) for d in data.values()])
|
||||
assert all(
|
||||
[isinstance(d["output_points"], torch.Tensor) for d in data.values()]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["input_points"].x.shape == torch.Size((20, 10))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["output_points"].shape == torch.Size((20, 10))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["input_points"].edge_index.shape == torch.Size((2, 60))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[d["input_points"].edge_attr.shape[0] == 60 for d in data.values()]
|
||||
)
|
||||
|
||||
data = dataset.fetch_from_idx_list([i for i in range(20)])
|
||||
assert isinstance(data, dict)
|
||||
assert all([isinstance(d['input_points'], Data)
|
||||
for d in data.values()])
|
||||
assert all([isinstance(d['output_points'], torch.Tensor)
|
||||
for d in data.values()])
|
||||
assert all([d['input_points'].x.shape == torch.Size((400, 10))
|
||||
for d in data.values()])
|
||||
assert all([d['output_points'].shape == torch.Size((400, 10))
|
||||
for d in data.values()])
|
||||
assert all([d['input_points'].edge_index.shape ==
|
||||
torch.Size((2, 1200)) for d in data.values()])
|
||||
assert all([d['input_points'].edge_attr.shape[0]
|
||||
== 1200 for d in data.values()])
|
||||
assert all([isinstance(d["input_points"], Data) for d in data.values()])
|
||||
assert all(
|
||||
[isinstance(d["output_points"], torch.Tensor) for d in data.values()]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["input_points"].x.shape == torch.Size((400, 10))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["output_points"].shape == torch.Size((400, 10))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[
|
||||
d["input_points"].edge_index.shape == torch.Size((2, 1200))
|
||||
for d in data.values()
|
||||
]
|
||||
)
|
||||
assert all(
|
||||
[d["input_points"].edge_attr.shape[0] == 1200 for d in data.values()]
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user