Filippo0.2 (#361)

* Add summation and remove deepcopy (only for tensors) in LabelTensor class
* Update operators for compatibility with updated LabelTensor implementation
* Implement labels.setter in LabelTensor class
* Update LabelTensor

---------

Co-authored-by: FilippoOlivo <filippo@filippoolivo.com>
This commit is contained in:
Dario Coscia
2024-10-04 15:59:09 +02:00
committed by Nicola Demo
parent 1d3df2a127
commit fdb8f65143
4 changed files with 212 additions and 92 deletions

View File

@@ -35,14 +35,34 @@ class LabelTensor(torch.Tensor):
{1: {"name": "space"['a', 'b', 'c'])
"""
self.labels = None
self.labels = labels
@property
def labels(self):
"""Property decorator for labels
:return: labels of self
:rtype: list
"""
return self._labels
@labels.setter
def labels(self, labels):
""""
Set properly the parameter _labels
:param labels: Labels to assign to the class variable _labels.
:type: labels: str | list(str) | dict
"""
if hasattr(self, 'labels') is False:
self.init_labels()
if isinstance(labels, dict):
self.update_labels(labels)
self.update_labels_from_dict(labels)
elif isinstance(labels, list):
self.init_labels_from_list(labels)
self.update_labels_from_list(labels)
elif isinstance(labels, str):
labels = [labels]
self.init_labels_from_list(labels)
self.update_labels_from_list(labels)
else:
raise ValueError(f"labels must be list, dict or string.")
@@ -60,38 +80,38 @@ class LabelTensor(torch.Tensor):
if isinstance(label_to_extract, (str, int)):
label_to_extract = [label_to_extract]
if isinstance(label_to_extract, (tuple, list)):
last_dim_label = self.labels[self.tensor.ndim - 1]['dof']
last_dim_label = self._labels[self.tensor.ndim - 1]['dof']
if set(label_to_extract).issubset(last_dim_label) is False:
raise ValueError('Cannot extract a dof which is not in the original LabelTensor')
idx_to_extract = [last_dim_label.index(i) for i in label_to_extract]
new_tensor = deepcopy(self.tensor)
new_tensor = self.tensor
new_tensor = new_tensor[..., idx_to_extract]
new_labels = deepcopy(self.labels)
new_labels = deepcopy(self._labels)
last_dim_new_label = {self.tensor.ndim - 1: {
'dof': label_to_extract,
'name': self.labels[self.tensor.ndim - 1]['name']
'name': self._labels[self.tensor.ndim - 1]['name']
}}
new_labels.update(last_dim_new_label)
elif isinstance(label_to_extract, dict):
new_labels = (deepcopy(self.labels))
new_tensor = deepcopy(self.tensor)
new_labels = (deepcopy(self._labels))
new_tensor = self.tensor
for k, v in label_to_extract.items():
idx_dim = None
for kl, vl in self.labels.items():
for kl, vl in self._labels.items():
if vl['name'] == k:
idx_dim = kl
break
dim_labels = self.labels[idx_dim]['dof']
dim_labels = self._labels[idx_dim]['dof']
if isinstance(label_to_extract[k], (int, str)):
label_to_extract[k] = [label_to_extract[k]]
if set(label_to_extract[k]).issubset(dim_labels) is False:
raise ValueError('Cannot extract a dof which is not in the original labeltensor')
raise ValueError('Cannot extract a dof which is not in the original LabelTensor')
idx_to_extract = [dim_labels.index(i) for i in label_to_extract[k]]
indexer = [slice(None)] * idx_dim + [idx_to_extract] + [slice(None)] * (self.tensor.ndim - idx_dim - 1)
new_tensor = new_tensor[indexer]
dim_new_label = {idx_dim: {
'dof': label_to_extract[k],
'name': self.labels[idx_dim]['name']
'name': self._labels[idx_dim]['name']
}}
new_labels.update(dim_new_label)
else:
@@ -104,7 +124,7 @@ class LabelTensor(torch.Tensor):
"""
s = ''
for key, value in self.labels.items():
for key, value in self._labels.items():
s += f"{key}: {value}\n"
s += '\n'
s += super().__str__()
@@ -155,7 +175,7 @@ class LabelTensor(torch.Tensor):
def requires_grad_(self, mode=True):
lt = super().requires_grad_(mode)
lt.labels = self.labels
lt.labels = self._labels
return lt
@property
@@ -181,10 +201,19 @@ class LabelTensor(torch.Tensor):
:rtype: LabelTensor
"""
out = LabelTensor(super().clone(*args, **kwargs), self.labels)
out = LabelTensor(super().clone(*args, **kwargs), self._labels)
return out
def update_labels(self, labels):
def init_labels(self):
self._labels = {
idx_: {
'dof': range(self.tensor.shape[idx_]),
'name': idx_
} for idx_ in range(self.tensor.ndim)
}
def update_labels_from_dict(self, labels):
"""
Update the internal label representation according to the values passed as input.
@@ -192,21 +221,16 @@ class LabelTensor(torch.Tensor):
:type labels: dict
:raises ValueError: dof list contain duplicates or number of dof does not match with tensor shape
"""
self.labels = {
idx_: {
'dof': range(self.tensor.shape[idx_]),
'name': idx_
} for idx_ in range(self.tensor.ndim)
}
tensor_shape = self.tensor.shape
for k, v in labels.items():
if len(v['dof']) != len(set(v['dof'])):
raise ValueError("dof must be unique")
if len(v['dof']) != tensor_shape[k]:
raise ValueError('Number of dof does not match with tensor dimension')
self.labels.update(labels)
self._labels.update(labels)
def init_labels_from_list(self, labels):
def update_labels_from_list(self, labels):
"""
Given a list of dof, this method update the internal label representation
@@ -214,4 +238,34 @@ class LabelTensor(torch.Tensor):
:type labels: list
"""
last_dim_labels = {self.tensor.ndim - 1: {'dof': labels, 'name': self.tensor.ndim - 1}}
self.update_labels(last_dim_labels)
self.update_labels_from_dict(last_dim_labels)
@staticmethod
def summation(tensors):
if len(tensors) == 0:
raise ValueError('tensors list must not be empty')
if len(tensors) == 1:
return tensors[0]
labels = tensors[0].labels
for j in range(tensors[0].ndim):
for i in range(1, len(tensors)):
if labels[j] != tensors[i].labels[j]:
labels.pop(j)
break
data = torch.zeros(tensors[0].tensor.shape)
for i in range(len(tensors)):
data += tensors[i].tensor
new_tensor = LabelTensor(data, labels)
return new_tensor
def last_dim_dof(self):
return self._labels[self.tensor.ndim - 1]['dof']
def append(self, tensor, mode='std'):
print(self.labels)
print(tensor.labels)
if mode == 'std':
new_label_tensor = LabelTensor.cat([self, tensor], dim=self.tensor.ndim - 1)
return new_label_tensor

View File

@@ -1,13 +1,13 @@
"""
Module for operators vectorize implementation. Differential operators are used to write any differential problem.
These operators are implemented to work on different accellerators: CPU, GPU, TPU or MPS.
These operators are implemented to work on different accelerators: CPU, GPU, TPU or MPS.
All operators take as input a tensor onto which computing the operator, a tensor with respect
to which computing the operator, the name of the output variables to calculate the operator
for (in case of multidimensional functions), and the variables name on which the operator is calculated.
"""
import torch
from copy import deepcopy
from pina.label_tensor import LabelTensor
@@ -49,12 +49,12 @@ def grad(output_, input_, components=None, d=None):
:rtype: LabelTensor
"""
if len(output_.labels) != 1:
if len(output_.labels[output_.tensor.ndim-1]['dof']) != 1:
raise RuntimeError("only scalar function can be differentiated")
if not all([di in input_.labels for di in d]):
if not all([di in input_.labels[input_.tensor.ndim-1]['dof'] for di in d]):
raise RuntimeError("derivative labels missing from input tensor")
output_fieldname = output_.labels[0]
output_fieldname = output_.labels[output_.ndim-1]['dof'][0]
gradients = torch.autograd.grad(
output_,
input_,
@@ -65,41 +65,35 @@ def grad(output_, input_, components=None, d=None):
retain_graph=True,
allow_unused=True,
)[0]
gradients.labels = input_.labels
new_labels = deepcopy(input_.labels)
gradients.labels = new_labels
gradients = gradients.extract(d)
gradients.labels = [f"d{output_fieldname}d{i}" for i in d]
new_labels[input_.tensor.ndim - 1]['dof'] = [f"d{output_fieldname}d{i}" for i in d]
gradients.labels = new_labels
return gradients
if not isinstance(input_, LabelTensor):
raise TypeError
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if output_.shape[1] == 1: # scalar output ################################
if output_.shape[output_.ndim-1] == 1: # scalar output ################################
if components != output_.labels:
if components != output_.labels[output_.tensor.ndim-1]['dof']:
raise RuntimeError
gradients = grad_scalar_output(output_, input_, d)
elif output_.shape[1] >= 2: # vector output ##############################
elif output_.shape[output_.ndim-1] >= 2: # vector output ##############################
tensor_to_cat = []
for i, c in enumerate(components):
c_output = output_.extract([c])
if i == 0:
gradients = grad_scalar_output(c_output, input_, d)
else:
gradients = gradients.append(
grad_scalar_output(c_output, input_, d)
)
tensor_to_cat.append(grad_scalar_output(c_output, input_, d))
gradients = LabelTensor.cat(tensor_to_cat, dim=output_.tensor.ndim-1)
else:
raise NotImplementedError
return gradients
@@ -130,27 +124,29 @@ def div(output_, input_, components=None, d=None):
raise TypeError
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if output_.shape[1] < 2 or len(components) < 2:
if output_.shape[output_.ndim-1] < 2 or len(components) < 2:
raise ValueError("div supported only for vector fields")
if len(components) != len(d):
raise ValueError
grad_output = grad(output_, input_, components, d)
div = torch.zeros(input_.shape[0], 1, device=output_.device)
labels = [None] * len(components)
last_dim_dof = [None] * len(components)
to_sum_tensors = []
for i, (c, d) in enumerate(zip(components, d)):
c_fields = f"d{c}d{d}"
div[:, 0] += grad_output.extract(c_fields).sum(axis=1)
labels[i] = c_fields
last_dim_dof[i] = c_fields
to_sum_tensors.append(grad_output.extract(c_fields))
div = div.as_subclass(LabelTensor)
div.labels = ["+".join(labels)]
div = LabelTensor.summation(to_sum_tensors)
new_labels = deepcopy(input_.labels)
new_labels[input_.tensor.ndim-1]['dof'] = ["+".join(last_dim_dof)]
div.labels = new_labels
return div
@@ -205,10 +201,10 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
return result
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
if method == "divgrad":
raise NotImplementedError("divgrad not implemented as method")
@@ -218,25 +214,43 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
elif method == "std":
if len(components) == 1:
result = scalar_laplace(output_, input_, components, d)
# result = scalar_laplace(output_, input_, components, d) # TODO check (from 0.1)
grad_output = grad(output_, input_, components=components, d=d)
to_append_tensors = []
for i, label in enumerate(grad_output.labels[grad_output.ndim-1]['dof']):
gg = grad(grad_output, input_, d=d, components=[label])
to_append_tensors.append(gg.extract([gg.labels[gg.tensor.ndim-1]['dof'][i]]))
labels = [f"dd{components[0]}"]
result = LabelTensor.summation(tensors=to_append_tensors)
result.labels = labels
else:
result = torch.empty(
size=(input_.shape[0], len(components)),
dtype=output_.dtype,
device=output_.device,
)
labels = [None] * len(components)
for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
labels[idx] = f"dd{c}"
# result = torch.empty( # TODO check (from 0.1)
# size=(input_.shape[0], len(components)),
# dtype=output_.dtype,
# device=output_.device,
# )
# labels = [None] * len(components)
# for idx, c in enumerate(components):
# result[:, idx] = scalar_laplace(output_, input_, c, d).flatten()
# labels[idx] = f"dd{c}"
result = result.as_subclass(LabelTensor)
result.labels = labels
# result = result.as_subclass(LabelTensor)
# result.labels = labels
labels = [None] * len(components)
to_append_tensors = [None] * len(components)
for idx, (ci, di) in enumerate(zip(components, d)):
if not isinstance(ci, list):
ci = [ci]
if not isinstance(di, list):
di = [di]
grad_output = grad(output_, input_, components=ci, d=di)
to_append_tensors[idx] = grad(grad_output, input_, d=di)
labels[idx] = f"dd{ci[0]}dd{di[0]}"
result = LabelTensor.cat(tensors=to_append_tensors, dim=output_.tensor.ndim-1)
result.labels = labels
return result
# TODO Fix advection operator
def advection(output_, input_, velocity_field, components=None, d=None):
"""
Perform advection operation. The operator works for vectorial functions,
@@ -258,10 +272,10 @@ def advection(output_, input_, velocity_field, components=None, d=None):
:rtype: LabelTensor
"""
if d is None:
d = input_.labels
d = input_.labels[input_.tensor.ndim-1]['dof']
if components is None:
components = output_.labels
components = output_.labels[output_.tensor.ndim-1]['dof']
tmp = (
grad(output_, input_, components, d)