Filippo0.2 (#361)

* Add summation and remove deepcopy (only for tensors) in LabelTensor class
* Update operators for compatibility with updated LabelTensor implementation
* Implement labels.setter in LabelTensor class
* Update LabelTensor

---------

Co-authored-by: FilippoOlivo <filippo@filippoolivo.com>
This commit is contained in:
Dario Coscia
2024-10-04 15:59:09 +02:00
committed by Nicola Demo
parent 1d3df2a127
commit fdb8f65143
4 changed files with 212 additions and 92 deletions

View File

@@ -35,14 +35,34 @@ class LabelTensor(torch.Tensor):
{1: {"name": "space"['a', 'b', 'c'])
"""
self.labels = None
self.labels = labels
@property
def labels(self):
"""Property decorator for labels
:return: labels of self
:rtype: list
"""
return self._labels
@labels.setter
def labels(self, labels):
""""
Set properly the parameter _labels
:param labels: Labels to assign to the class variable _labels.
:type: labels: str | list(str) | dict
"""
if hasattr(self, 'labels') is False:
self.init_labels()
if isinstance(labels, dict):
self.update_labels(labels)
self.update_labels_from_dict(labels)
elif isinstance(labels, list):
self.init_labels_from_list(labels)
self.update_labels_from_list(labels)
elif isinstance(labels, str):
labels = [labels]
self.init_labels_from_list(labels)
self.update_labels_from_list(labels)
else:
raise ValueError(f"labels must be list, dict or string.")
@@ -60,38 +80,38 @@ class LabelTensor(torch.Tensor):
if isinstance(label_to_extract, (str, int)):
label_to_extract = [label_to_extract]
if isinstance(label_to_extract, (tuple, list)):
last_dim_label = self.labels[self.tensor.ndim - 1]['dof']
last_dim_label = self._labels[self.tensor.ndim - 1]['dof']
if set(label_to_extract).issubset(last_dim_label) is False:
raise ValueError('Cannot extract a dof which is not in the original LabelTensor')
idx_to_extract = [last_dim_label.index(i) for i in label_to_extract]
new_tensor = deepcopy(self.tensor)
new_tensor = self.tensor
new_tensor = new_tensor[..., idx_to_extract]
new_labels = deepcopy(self.labels)
new_labels = deepcopy(self._labels)
last_dim_new_label = {self.tensor.ndim - 1: {
'dof': label_to_extract,
'name': self.labels[self.tensor.ndim - 1]['name']
'name': self._labels[self.tensor.ndim - 1]['name']
}}
new_labels.update(last_dim_new_label)
elif isinstance(label_to_extract, dict):
new_labels = (deepcopy(self.labels))
new_tensor = deepcopy(self.tensor)
new_labels = (deepcopy(self._labels))
new_tensor = self.tensor
for k, v in label_to_extract.items():
idx_dim = None
for kl, vl in self.labels.items():
for kl, vl in self._labels.items():
if vl['name'] == k:
idx_dim = kl
break
dim_labels = self.labels[idx_dim]['dof']
dim_labels = self._labels[idx_dim]['dof']
if isinstance(label_to_extract[k], (int, str)):
label_to_extract[k] = [label_to_extract[k]]
if set(label_to_extract[k]).issubset(dim_labels) is False:
raise ValueError('Cannot extract a dof which is not in the original labeltensor')
raise ValueError('Cannot extract a dof which is not in the original LabelTensor')
idx_to_extract = [dim_labels.index(i) for i in label_to_extract[k]]
indexer = [slice(None)] * idx_dim + [idx_to_extract] + [slice(None)] * (self.tensor.ndim - idx_dim - 1)
new_tensor = new_tensor[indexer]
dim_new_label = {idx_dim: {
'dof': label_to_extract[k],
'name': self.labels[idx_dim]['name']
'name': self._labels[idx_dim]['name']
}}
new_labels.update(dim_new_label)
else:
@@ -104,7 +124,7 @@ class LabelTensor(torch.Tensor):
"""
s = ''
for key, value in self.labels.items():
for key, value in self._labels.items():
s += f"{key}: {value}\n"
s += '\n'
s += super().__str__()
@@ -155,7 +175,7 @@ class LabelTensor(torch.Tensor):
def requires_grad_(self, mode=True):
lt = super().requires_grad_(mode)
lt.labels = self.labels
lt.labels = self._labels
return lt
@property
@@ -181,10 +201,19 @@ class LabelTensor(torch.Tensor):
:rtype: LabelTensor
"""
out = LabelTensor(super().clone(*args, **kwargs), self.labels)
out = LabelTensor(super().clone(*args, **kwargs), self._labels)
return out
def update_labels(self, labels):
def init_labels(self):
self._labels = {
idx_: {
'dof': range(self.tensor.shape[idx_]),
'name': idx_
} for idx_ in range(self.tensor.ndim)
}
def update_labels_from_dict(self, labels):
"""
Update the internal label representation according to the values passed as input.
@@ -192,21 +221,16 @@ class LabelTensor(torch.Tensor):
:type labels: dict
:raises ValueError: dof list contain duplicates or number of dof does not match with tensor shape
"""
self.labels = {
idx_: {
'dof': range(self.tensor.shape[idx_]),
'name': idx_
} for idx_ in range(self.tensor.ndim)
}
tensor_shape = self.tensor.shape
for k, v in labels.items():
if len(v['dof']) != len(set(v['dof'])):
raise ValueError("dof must be unique")
if len(v['dof']) != tensor_shape[k]:
raise ValueError('Number of dof does not match with tensor dimension')
self.labels.update(labels)
self._labels.update(labels)
def init_labels_from_list(self, labels):
def update_labels_from_list(self, labels):
"""
Given a list of dof, this method update the internal label representation
@@ -214,4 +238,34 @@ class LabelTensor(torch.Tensor):
:type labels: list
"""
last_dim_labels = {self.tensor.ndim - 1: {'dof': labels, 'name': self.tensor.ndim - 1}}
self.update_labels(last_dim_labels)
self.update_labels_from_dict(last_dim_labels)
@staticmethod
def summation(tensors):
if len(tensors) == 0:
raise ValueError('tensors list must not be empty')
if len(tensors) == 1:
return tensors[0]
labels = tensors[0].labels
for j in range(tensors[0].ndim):
for i in range(1, len(tensors)):
if labels[j] != tensors[i].labels[j]:
labels.pop(j)
break
data = torch.zeros(tensors[0].tensor.shape)
for i in range(len(tensors)):
data += tensors[i].tensor
new_tensor = LabelTensor(data, labels)
return new_tensor
def last_dim_dof(self):
return self._labels[self.tensor.ndim - 1]['dof']
def append(self, tensor, mode='std'):
print(self.labels)
print(tensor.labels)
if mode == 'std':
new_label_tensor = LabelTensor.cat([self, tensor], dim=self.tensor.ndim - 1)
return new_label_tensor

View File

@@ -1,13 +1,13 @@
"""
Module for operators vectorize implementation. Differential operators are used to write any differential problem.
These operators are implemented to work on different accellerators: CPU, GPU, TPU or MPS.
These operators are implemented to work on different accelerators: CPU, GPU, TPU or MPS.
All operators take as input a tensor onto which computing the operator, a tensor with respect
to which computing the operator, the name of the output variables to calculate the operator
for (in case of multidimensional functions), and the variables name on which the operator is calculated.
"""
import torch
from copy import deepcopy
from pina.label_tensor import LabelTensor
@@ -49,12 +49,12 @@ def grad(output_, input_, components=None, d=None):
:rtype: LabelTensor
"""
if len(output_.labels) != 1:
if len(output_.labels[output_.tensor.ndim-1]['dof']) != 1:
raise RuntimeError("only scalar function can be differentiated")
if not all([di in input_.labels for di in d]):
if not all([di in input_.labels[input_.tensor.ndim-1]['dof'] for di in d]):
raise RuntimeError("derivative labels missing from input tensor")
output_fieldname = output_.labels[0]
output_fieldname = output_.labels[output_.ndim-1]['dof'][0]
gradients = torch.autograd.grad(
output_,
input_,
@@ -65,41 +65,35 @@ def grad(output_, input_, components=None, d=None):
retain_graph=True,
allow_unused=True,
)[0]
gradients.labels = input_.labels
new_labels = deepcopy(input_.labels)
gradients.labels = new_labels
gradients = gradients.extract(d)
gradients.labels = [f"d{output_fieldname}d{i}" for i in d]
new_labels[input_.tensor.ndim - 1]['dof'] = [f"d{output_fieldname}d{i}" for i in d]
gradients.labels = new_labels
return gradients
if not isinstance(input_, LabelTensor):
raise TypeError
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if output_.shape[1] == 1: # scalar output ################################
if output_.shape[output_.ndim-1] == 1: # scalar output ################################
if components != output_.labels:
if components != output_.labels[output_.tensor.ndim-1]['dof']:
raise RuntimeError
gradients = grad_scalar_output(output_, input_, d)
elif output_.shape[1] >= 2: # vector output ##############################
elif output_.shape[output_.ndim-1] >= 2: # vector output ##############################
tensor_to_cat = []
for i, c in enumerate(components):
c_output = output_.extract([c])
if i == 0:
gradients = grad_scalar_output(c_output, input_, d)
else:
gradients = gradients.append(
grad_scalar_output(c_output, input_, d)
)
tensor_to_cat.append(grad_scalar_output(c_output, input_, d))
gradients = LabelTensor.cat(tensor_to_cat, dim=output_.tensor.ndim-1)
else:
raise NotImplementedError
return gradients
@@ -130,27 +124,29 @@ def div(output_, input_, components=None, d=None):
raise TypeError
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if output_.shape[1] < 2 or len(components) < 2:
if output_.shape[output_.ndim-1] < 2 or len(components) < 2:
raise ValueError("div supported only for vector fields")
if len(components) != len(d):
raise ValueError
grad_output = grad(output_, input_, components, d)
div = torch.zeros(input_.shape[0], 1, device=output_.device)
labels = [None] * len(components)
last_dim_dof = [None] * len(components)
to_sum_tensors = []
for i, (c, d) in enumerate(zip(components, d)):
c_fields = f"d{c}d{d}"
div[:, 0] += grad_output.extract(c_fields).sum(axis=1)
labels[i] = c_fields
last_dim_dof[i] = c_fields
to_sum_tensors.append(grad_output.extract(c_fields))
div = div.as_subclass(LabelTensor)
div.labels = ["+".join(labels)]
div = LabelTensor.summation(to_sum_tensors)
new_labels = deepcopy(input_.labels)
new_labels[input_.tensor.ndim-1]['dof'] = ["+".join(last_dim_dof)]
div.labels = new_labels
return div
@@ -205,10 +201,10 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
return result
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if method == "divgrad":
raise NotImplementedError("divgrad not implemented as method")
@@ -218,25 +214,43 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
elif method == "std":
if len(components) == 1:
result = scalar_laplace(output_, input_, components, d)
# result = scalar_laplace(output_, input_, components, d) # TODO check (from 0.1)
grad_output = grad(output_, input_, components=components, d=d)
to_append_tensors = []
for i, label in enumerate(grad_output.labels[grad_output.ndim-1]['dof']):
gg = grad(grad_output, input_, d=d, components=[label])
to_append_tensors.append(gg.extract([gg.labels[gg.tensor.ndim-1]['dof'][i]]))
labels = [f"dd{components[0]}"]
result = LabelTensor.summation(tensors=to_append_tensors)
result.labels = labels
else:
result = torch.empty(
size=(input_.shape[0], len(components)),
dtype=output_.dtype,
device=output_.device,
)
labels = [None] * len(components)
for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
labels[idx] = f"dd{c}"
# result = torch.empty( # TODO check (from 0.1)
# size=(input_.shape[0], len(components)),
# dtype=output_.dtype,
# device=output_.device,
# )
# labels = [None] * len(components)
# for idx, c in enumerate(components):
# result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
# labels[idx] = f"dd{c}"
result = result.as_subclass(LabelTensor)
# result = result.as_subclass(LabelTensor)
# result.labels = labels
labels = [None] * len(components)
to_append_tensors = [None] * len(components)
for idx, (ci, di) in enumerate(zip(components, d)):
if not isinstance(ci, list):
ci = [ci]
if not isinstance(di, list):
di = [di]
grad_output = grad(output_, input_, components=ci, d=di)
to_append_tensors[idx] = grad(grad_output, input_, d=di)
labels[idx] = f"dd{ci[0]}dd{di[0]}"
result = LabelTensor.cat(tensors=to_append_tensors, dim=output_.tensor.ndim-1)
result.labels = labels
return result
# TODO Fix advection operator
def advection(output_, input_, velocity_field, components=None, d=None):
"""
Perform advection operation. The operator works for vectorial functions,
@@ -258,10 +272,10 @@ def advection(output_, input_, velocity_field, components=None, d=None):
:rtype: LabelTensor
"""
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
tmp = (
grad(output_, input_, components, d)

View File

@@ -17,12 +17,14 @@ labels_row = {
"dof": range(20)
}
}
labels_list = ['x', 'y', 'z']
labels_all = labels_column | labels_row
@pytest.mark.parametrize("labels", [labels_column, labels_row, labels_all])
@pytest.mark.parametrize("labels", [labels_column, labels_row, labels_all, labels_list])
def test_constructor(labels):
LabelTensor(data, labels)
def test_wrong_constructor():
with pytest.raises(ValueError):
LabelTensor(data, ['a', 'b'])
@@ -61,7 +63,6 @@ def test_extract_2D(labels_te):
assert torch.all(torch.isclose(data[2,2].reshape(1, 1), new))
def test_extract_3D():
labels = labels_all
data = torch.rand(20, 3, 4)
labels = {
1: {
@@ -80,6 +81,7 @@ def test_extract_3D():
tensor = LabelTensor(data, labels)
new = tensor.extract(labels_te)
tensor2 = LabelTensor(data, labels)
assert new.ndim == tensor.ndim
assert new.shape[0] == 20
assert new.shape[1] == 2
@@ -88,6 +90,10 @@ def test_extract_3D():
data[:, 0::2, 1:4].reshape(20, 2, 3),
new
))
assert tensor2.ndim == tensor.ndim
assert tensor2.shape == tensor.shape
assert tensor.labels == tensor2.labels
assert new.shape != tensor.shape
def test_concatenation_3D():
data_1 = torch.rand(20, 3, 4)
@@ -146,3 +152,51 @@ def test_concatenation_3D():
assert lt_cat.labels[2]['dof'] == range(5)
assert lt_cat.labels[0]['dof'] == range(20)
assert lt_cat.labels[1]['dof'] == range(3)
def test_summation():
lt1 = LabelTensor(torch.ones(20,3), labels_all)
lt2 = LabelTensor(torch.ones(30,3), ['x', 'y', 'z'])
with pytest.raises(RuntimeError):
LabelTensor.summation([lt1, lt2])
lt1 = LabelTensor(torch.ones(20,3), labels_all)
lt2 = LabelTensor(torch.ones(20,3), labels_all)
lt_sum = LabelTensor.summation([lt1, lt2])
assert lt_sum.ndim == lt_sum.ndim
assert lt_sum.shape[0] == 20
assert lt_sum.shape[1] == 3
assert lt_sum.labels == labels_all
assert torch.eq(lt_sum.tensor, torch.ones(20,3)*2).all()
lt1 = LabelTensor(torch.ones(20,3), labels_all)
lt2 = LabelTensor(torch.ones(20,3), labels_all)
lt3 = LabelTensor(torch.zeros(20, 3), labels_all)
lt_sum = LabelTensor.summation([lt1, lt2, lt3])
assert lt_sum.ndim == lt_sum.ndim
assert lt_sum.shape[0] == 20
assert lt_sum.shape[1] == 3
assert lt_sum.labels == labels_all
assert torch.eq(lt_sum.tensor, torch.ones(20,3)*2).all()
def test_append_3D():
data_1 = torch.rand(20, 3, 4)
labels_1 = ['x', 'y', 'z', 'w']
lt1 = LabelTensor(data_1, labels_1)
data_2 = torch.rand(50, 3, 4)
labels_2 = ['x', 'y', 'z', 'w']
lt2 = LabelTensor(data_2, labels_2)
lt1 = lt1.append(lt2)
assert lt1.shape == (70, 3, 4)
assert lt1.labels[0]['dof'] == range(70)
assert lt1.labels[1]['dof'] == range(3)
assert lt1.labels[2]['dof'] == ['x', 'y', 'z', 'w']
data_1 = torch.rand(20, 3, 2)
labels_1 = ['x', 'y']
lt1 = LabelTensor(data_1, labels_1)
data_2 = torch.rand(20, 3, 2)
labels_2 = ['z', 'w']
lt2 = LabelTensor(data_2, labels_2)
lt1 = lt1.append(lt2, mode='cross')
assert lt1.shape == (20, 3, 4)
assert lt1.labels[0]['dof'] == range(20)
assert lt1.labels[1]['dof'] == range(3)
assert lt1.labels[2]['dof'] == ['x', 'y', 'z', 'w']

View File

@@ -16,28 +16,29 @@ def func_scalar(x):
return x_**2 + y_**2 + z_**2
inp = LabelTensor(torch.rand((20, 3), requires_grad=True), ['x', 'y', 'z'])
tensor_v = LabelTensor(func_vector(inp), ['a', 'b', 'c'])
tensor_s = LabelTensor(func_scalar(inp).reshape(-1, 1), ['a'])
data = torch.rand((20, 3))
inp = LabelTensor(data, ['x', 'y', 'mu']).requires_grad_(True)
labels = ['a', 'b', 'c']
tensor_v = LabelTensor(func_vec(inp), labels)
tensor_s = LabelTensor(func_scalar(inp).reshape(-1, 1), labels[0])
def test_grad_scalar_output():
grad_tensor_s = grad(tensor_s, inp)
true_val = 2*inp
assert grad_tensor_s.shape == inp.shape
assert grad_tensor_s.labels == [
f'd{tensor_s.labels[0]}d{i}' for i in inp.labels
assert grad_tensor_s.labels[grad_tensor_s.ndim-1]['dof'] == [
f'd{tensor_s.labels[tensor_s.ndim-1]["dof"][0]}d{i}' for i in inp.labels[inp.ndim-1]['dof']
]
assert torch.allclose(grad_tensor_s, true_val)
grad_tensor_s = grad(tensor_s, inp, d=['x', 'y'])
true_val = 2*inp.extract(['x', 'y'])
assert grad_tensor_s.shape == (inp.shape[0], 2)
assert grad_tensor_s.labels == [
f'd{tensor_s.labels[0]}d{i}' for i in ['x', 'y']
assert grad_tensor_s.shape == (20, 2)
assert grad_tensor_s.labels[grad_tensor_s.ndim-1]['dof'] == [
f'd{tensor_s.labels[tensor_s.ndim-1]["dof"][0]}d{i}' for i in ['x', 'y']
]
assert torch.allclose(grad_tensor_s, true_val)
def test_grad_vector_output():
grad_tensor_v = grad(tensor_v, inp)
true_val = torch.cat(
@@ -74,7 +75,6 @@ def test_grad_vector_output():
]
assert torch.allclose(grad_tensor_v, true_val)
def test_div_vector_output():
div_tensor_v = div(tensor_v, inp)
true_val = 2*torch.sum(inp, dim=1).reshape(-1,1)
@@ -88,7 +88,6 @@ def test_div_vector_output():
assert div_tensor_v.labels == [f'dadx+dbdy']
assert torch.allclose(div_tensor_v, true_val)
def test_laplacian_scalar_output():
laplace_tensor_s = laplacian(tensor_s, inp)
true_val = 6*torch.ones_like(laplace_tensor_s)
@@ -102,7 +101,6 @@ def test_laplacian_scalar_output():
assert laplace_tensor_s.labels == [f"dd{tensor_s.labels[0]}"]
assert torch.allclose(laplace_tensor_s, true_val)
def test_laplacian_vector_output():
laplace_tensor_v = laplacian(tensor_v, inp)
true_val = 2*torch.ones_like(tensor_v)