fix labels management in operators (#524)

* fix bug in laplace labels

* fix label management and add test
This commit is contained in:
Giovanni Canali
2025-03-26 16:49:18 +01:00
committed by FilippoOlivo
parent 80c257da4d
commit 6d39e2fa98
2 changed files with 92 additions and 22 deletions

View File

@@ -27,12 +27,14 @@ def grad(output_, input_, components=None, d=None):
computed. computed.
:param LabelTensor input_: The input tensor with respect to which the :param LabelTensor input_: The input tensor with respect to which the
gradient is computed. gradient is computed.
:param list[str] components: The names of the output variables for which to :param components: The names of the output variables for which to
compute the gradient. It must be a subset of the output labels. compute the gradient. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``. If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which :type components: str | list[str]
:param d: The names of the input variables with respect to which
the gradient is computed. It must be a subset of the input labels. the gradient is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``. If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:raises TypeError: If the input tensor is not a LabelTensor. :raises TypeError: If the input tensor is not a LabelTensor.
:raises RuntimeError: If the output is a scalar field and the components :raises RuntimeError: If the output is a scalar field and the components
are not equal to the output labels. are not equal to the output labels.
@@ -50,9 +52,10 @@ def grad(output_, input_, components=None, d=None):
computed. It must be a column tensor. computed. It must be a column tensor.
:param LabelTensor input_: The input tensor with respect to which the :param LabelTensor input_: The input tensor with respect to which the
gradient is computed. gradient is computed.
:param list[str] d: The names of the input variables with respect to :param d: The names of the input variables with respect to
which the gradient is computed. It must be a subset of the input which the gradient is computed. It must be a subset of the input
labels. If ``None``, all input variables are considered. labels. If ``None``, all input variables are considered.
:type d: str | list[str]
:raises RuntimeError: If a vectorial function is passed. :raises RuntimeError: If a vectorial function is passed.
:raises RuntimeError: If missing derivative labels. :raises RuntimeError: If missing derivative labels.
:return: The computed gradient tensor. :return: The computed gradient tensor.
@@ -89,6 +92,12 @@ def grad(output_, input_, components=None, d=None):
if components is None: if components is None:
components = output_.labels components = output_.labels
if not isinstance(components, list):
components = [components]
if not isinstance(d, list):
d = [d]
if output_.shape[1] == 1: # scalar output ################################ if output_.shape[1] == 1: # scalar output ################################
if components != output_.labels: if components != output_.labels:
@@ -120,12 +129,14 @@ def div(output_, input_, components=None, d=None):
computed. computed.
:param LabelTensor input_: The input tensor with respect to which the :param LabelTensor input_: The input tensor with respect to which the
divergence is computed. divergence is computed.
:param list[str] components: The names of the output variables for which to :param components: The names of the output variables for which to
compute the divergence. It must be a subset of the output labels. compute the divergence. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``. If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which :type components: str | list[str]
:param d: The names of the input variables with respect to which
the divergence is computed. It must be a subset of the input labels. the divergence is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``. If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:raises TypeError: If the input tensor is not a LabelTensor. :raises TypeError: If the input tensor is not a LabelTensor.
:raises ValueError: If the output is a scalar field. :raises ValueError: If the output is a scalar field.
:raises ValueError: If the number of components is not equal to the number :raises ValueError: If the number of components is not equal to the number
@@ -142,6 +153,12 @@ def div(output_, input_, components=None, d=None):
if components is None: if components is None:
components = output_.labels components = output_.labels
if not isinstance(components, list):
components = [components]
if not isinstance(d, list):
d = [d]
if output_.shape[1] < 2 or len(components) < 2: if output_.shape[1] < 2 or len(components) < 2:
raise ValueError("div supported only for vector fields") raise ValueError("div supported only for vector fields")
@@ -170,12 +187,14 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
computed. computed.
:param LabelTensor input_: The input tensor with respect to which the :param LabelTensor input_: The input tensor with respect to which the
laplacian is computed. laplacian is computed.
:param list[str] components: The names of the output variables for which to :param components: The names of the output variables for which to
compute the laplacian. It must be a subset of the output labels. compute the laplacian. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``. If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which :type components: str | list[str]
:param d: The names of the input variables with respect to which
the laplacian is computed. It must be a subset of the input labels. the laplacian is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``. If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:param str method: The method used to compute the Laplacian. Default is :param str method: The method used to compute the Laplacian. Default is
``std``. ``std``.
:raises NotImplementedError: If ``std=divgrad``. :raises NotImplementedError: If ``std=divgrad``.
@@ -191,12 +210,14 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
computed. It must be a column tensor. computed. It must be a column tensor.
:param LabelTensor input_: The input tensor with respect to which the :param LabelTensor input_: The input tensor with respect to which the
laplacian is computed. laplacian is computed.
:param list[str] components: The names of the output variables for which :param components: The names of the output variables for which
to compute the laplacian. It must be a subset of the output labels. to compute the laplacian. It must be a subset of the output labels.
If ``None``, all output variables are considered. If ``None``, all output variables are considered.
:param list[str] d: The names of the input variables with respect to :type components: str | list[str]
:param d: The names of the input variables with respect to
which the laplacian is computed. It must be a subset of the input which the laplacian is computed. It must be a subset of the input
labels. If ``None``, all input variables are considered. labels. If ``None``, all input variables are considered.
:type d: str | list[str]
:return: The computed laplacian tensor. :return: The computed laplacian tensor.
:rtype: LabelTensor :rtype: LabelTensor
""" """
@@ -216,21 +237,23 @@ def laplacian(output_, input_, components=None, d=None, method="std"):
if components is None: if components is None:
components = output_.labels components = output_.labels
if not isinstance(components, list):
components = [components]
if not isinstance(d, list):
d = [d]
if method == "divgrad": if method == "divgrad":
raise NotImplementedError("divgrad not implemented as method") raise NotImplementedError("divgrad not implemented as method")
if method == "std": if method == "std":
if len(components) == 1:
result = scalar_laplace(output_, input_, components, d)
labels = [f"dd{components[0]}"]
else:
result = torch.empty( result = torch.empty(
input_.shape[0], len(components), device=output_.device input_.shape[0], len(components), device=output_.device
) )
labels = [None] * len(components) labels = [None] * len(components)
for idx, c in enumerate(components): for idx, c in enumerate(components):
result[:, idx] = scalar_laplace(output_, input_, c, d).flatten() result[:, idx] = scalar_laplace(output_, input_, [c], d).flatten()
labels[idx] = f"dd{c}" labels[idx] = f"dd{c}"
result = result.as_subclass(LabelTensor) result = result.as_subclass(LabelTensor)
@@ -251,12 +274,14 @@ def advection(output_, input_, velocity_field, components=None, d=None):
is computed. is computed.
:param str velocity_field: The name of the output variable used as velocity :param str velocity_field: The name of the output variable used as velocity
field. It must be chosen among the output labels. field. It must be chosen among the output labels.
:param list[str] components: The names of the output variables for which :param components: The names of the output variables for which
to compute the advection. It must be a subset of the output labels. to compute the advection. It must be a subset of the output labels.
If ``None``, all output variables are considered. Default is ``None``. If ``None``, all output variables are considered. Default is ``None``.
:param list[str] d: The names of the input variables with respect to which :type components: str | list[str]
:param d: The names of the input variables with respect to which
the advection is computed. It must be a subset of the input labels. the advection is computed. It must be a subset of the input labels.
If ``None``, all input variables are considered. Default is ``None``. If ``None``, all input variables are considered. Default is ``None``.
:type d: str | list[str]
:return: The computed advection tensor. :return: The computed advection tensor.
:rtype: LabelTensor :rtype: LabelTensor
""" """
@@ -266,6 +291,12 @@ def advection(output_, input_, velocity_field, components=None, d=None):
if components is None: if components is None:
components = output_.labels components = output_.labels
if not isinstance(components, list):
components = [components]
if not isinstance(d, list):
d = [d]
tmp = ( tmp = (
grad(output_, input_, components, d) grad(output_, input_, components, d)
.reshape(-1, len(components), len(d)) .reshape(-1, len(components), len(d))

View File

@@ -164,3 +164,42 @@ def test_laplacian_vector_output2():
assert torch.allclose(lap_f.extract("ddu"), lap_u) assert torch.allclose(lap_f.extract("ddu"), lap_u)
assert torch.allclose(lap_f.extract("ddv"), lap_v) assert torch.allclose(lap_f.extract("ddv"), lap_v)
def test_label_format():
# Testing the format of `components` or `d` in case of single str of length
# greater than 1; e.g.: "aaa".
# This test is conducted only for gradient and laplacian, since div is not
# implemented for single components.
inp.labels = ["xx", "yy", "zz"]
tensor_v = LabelTensor(func_vector(inp), ["aa", "bbb", "c"])
comp = tensor_v.labels[0]
single_d = inp.labels[0]
# Single component as string + list of d
grad_tensor_v = grad(tensor_v, inp, components=comp, d=None)
assert grad_tensor_v.labels == [f"d{comp}d{i}" for i in inp.labels]
lap_tensor_v = laplacian(tensor_v, inp, components=comp, d=None)
assert lap_tensor_v.labels == [f"dd{comp}"]
# Single component as list + list of d
grad_tensor_v = grad(tensor_v, inp, components=[comp], d=None)
assert grad_tensor_v.labels == [f"d{comp}d{i}" for i in inp.labels]
lap_tensor_v = laplacian(tensor_v, inp, components=[comp], d=None)
assert lap_tensor_v.labels == [f"dd{comp}"]
# List of components + single d as string
grad_tensor_v = grad(tensor_v, inp, components=None, d=single_d)
assert grad_tensor_v.labels == [f"d{i}d{single_d}" for i in tensor_v.labels]
lap_tensor_v = laplacian(tensor_v, inp, components=None, d=single_d)
assert lap_tensor_v.labels == [f"dd{i}" for i in tensor_v.labels]
# List of components + single d as list
grad_tensor_v = grad(tensor_v, inp, components=None, d=[single_d])
assert grad_tensor_v.labels == [f"d{i}d{single_d}" for i in tensor_v.labels]
lap_tensor_v = laplacian(tensor_v, inp, components=None, d=[single_d])
assert lap_tensor_v.labels == [f"dd{i}" for i in tensor_v.labels]