🎨 Format Python code with psf/black
This commit is contained in:
@@ -5,6 +5,7 @@ All operators take as input a tensor onto which computing the operator, a tensor
|
||||
to which computing the operator, the name of the output variables to calculate the operator
|
||||
for (in case of multidimensional functions), and the variables name on which the operator is calculated.
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
||||
from pina.label_tensor import LabelTensor
|
||||
@@ -49,24 +50,25 @@ def grad(output_, input_, components=None, d=None):
|
||||
"""
|
||||
|
||||
if len(output_.labels) != 1:
|
||||
raise RuntimeError('only scalar function can be differentiated')
|
||||
raise RuntimeError("only scalar function can be differentiated")
|
||||
if not all([di in input_.labels for di in d]):
|
||||
raise RuntimeError('derivative labels missing from input tensor')
|
||||
raise RuntimeError("derivative labels missing from input tensor")
|
||||
|
||||
output_fieldname = output_.labels[0]
|
||||
gradients = torch.autograd.grad(output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(
|
||||
output_.size(),
|
||||
dtype=output_.dtype,
|
||||
device=output_.device),
|
||||
create_graph=True,
|
||||
retain_graph=True,
|
||||
allow_unused=True)[0]
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(
|
||||
output_.size(), dtype=output_.dtype, device=output_.device
|
||||
),
|
||||
create_graph=True,
|
||||
retain_graph=True,
|
||||
allow_unused=True,
|
||||
)[0]
|
||||
|
||||
gradients.labels = input_.labels
|
||||
gradients = gradients.extract(d)
|
||||
gradients.labels = [f'd{output_fieldname}d{i}' for i in d]
|
||||
gradients.labels = [f"d{output_fieldname}d{i}" for i in d]
|
||||
|
||||
return gradients
|
||||
|
||||
@@ -93,7 +95,8 @@ def grad(output_, input_, components=None, d=None):
|
||||
gradients = grad_scalar_output(c_output, input_, d)
|
||||
else:
|
||||
gradients = gradients.append(
|
||||
grad_scalar_output(c_output, input_, d))
|
||||
grad_scalar_output(c_output, input_, d)
|
||||
)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -133,7 +136,7 @@ def div(output_, input_, components=None, d=None):
|
||||
components = output_.labels
|
||||
|
||||
if output_.shape[1] < 2 or len(components) < 2:
|
||||
raise ValueError('div supported only for vector fields')
|
||||
raise ValueError("div supported only for vector fields")
|
||||
|
||||
if len(components) != len(d):
|
||||
raise ValueError
|
||||
@@ -142,16 +145,16 @@ def div(output_, input_, components=None, d=None):
|
||||
div = torch.zeros(input_.shape[0], 1, device=output_.device)
|
||||
labels = [None] * len(components)
|
||||
for i, (c, d) in enumerate(zip(components, d)):
|
||||
c_fields = f'd{c}d{d}'
|
||||
c_fields = f"d{c}d{d}"
|
||||
div[:, 0] += grad_output.extract(c_fields).sum(axis=1)
|
||||
labels[i] = c_fields
|
||||
|
||||
div = div.as_subclass(LabelTensor)
|
||||
div.labels = ['+'.join(labels)]
|
||||
div.labels = ["+".join(labels)]
|
||||
return div
|
||||
|
||||
|
||||
def laplacian(output_, input_, components=None, d=None, method='std'):
|
||||
def laplacian(output_, input_, components=None, d=None, method="std"):
|
||||
"""
|
||||
Compute Laplace operator. The operator works for vectorial and
|
||||
scalar functions, with multiple input coordinates.
|
||||
@@ -182,26 +185,27 @@ def laplacian(output_, input_, components=None, d=None, method='std'):
|
||||
if len(components) != len(d) and len(components) != 1:
|
||||
raise ValueError
|
||||
|
||||
if method == 'divgrad':
|
||||
raise NotImplementedError('divgrad not implemented as method')
|
||||
if method == "divgrad":
|
||||
raise NotImplementedError("divgrad not implemented as method")
|
||||
# TODO fix
|
||||
# grad_output = grad(output_, input_, components, d)
|
||||
# result = div(grad_output, input_, d=d)
|
||||
elif method == 'std':
|
||||
elif method == "std":
|
||||
|
||||
if len(components) == 1:
|
||||
grad_output = grad(output_, input_, components=components, d=d)
|
||||
result = torch.zeros(output_.shape[0], 1, device=output_.device)
|
||||
for i, label in enumerate(grad_output.labels):
|
||||
gg = grad(grad_output, input_, d=d, components=[label])
|
||||
result[:, 0] += super(torch.Tensor,
|
||||
gg.T).__getitem__(i) # TODO improve
|
||||
labels = [f'dd{components[0]}']
|
||||
result[:, 0] += super(torch.Tensor, gg.T).__getitem__(
|
||||
i
|
||||
) # TODO improve
|
||||
labels = [f"dd{components[0]}"]
|
||||
|
||||
else:
|
||||
result = torch.empty(input_.shape[0],
|
||||
len(components),
|
||||
device=output_.device)
|
||||
result = torch.empty(
|
||||
input_.shape[0], len(components), device=output_.device
|
||||
)
|
||||
labels = [None] * len(components)
|
||||
for idx, (ci, di) in enumerate(zip(components, d)):
|
||||
|
||||
@@ -212,7 +216,7 @@ def laplacian(output_, input_, components=None, d=None, method='std'):
|
||||
|
||||
grad_output = grad(output_, input_, components=ci, d=di)
|
||||
result[:, idx] = grad(grad_output, input_, d=di).flatten()
|
||||
labels[idx] = f'dd{ci}dd{di}'
|
||||
labels[idx] = f"dd{ci}dd{di}"
|
||||
|
||||
result = result.as_subclass(LabelTensor)
|
||||
result.labels = labels
|
||||
@@ -245,8 +249,11 @@ def advection(output_, input_, velocity_field, components=None, d=None):
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
tmp = grad(output_, input_, components, d).reshape(-1, len(components),
|
||||
len(d)).transpose(0, 1)
|
||||
tmp = (
|
||||
grad(output_, input_, components, d)
|
||||
.reshape(-1, len(components), len(d))
|
||||
.transpose(0, 1)
|
||||
)
|
||||
|
||||
tmp *= output_.extract(velocity_field)
|
||||
return tmp.sum(dim=2).T
|
||||
|
||||
Reference in New Issue
Block a user