Codacy correction
This commit is contained in:
committed by
Nicola Demo
parent
ea3d1924e7
commit
dd43c8304c
@@ -56,9 +56,9 @@ def grad(output_, input_, components=None, d=None):
|
||||
gradients = torch.autograd.grad(
|
||||
output_,
|
||||
input_,
|
||||
grad_outputs=torch.ones(
|
||||
output_.size(), dtype=output_.dtype, device=output_.device
|
||||
),
|
||||
grad_outputs=torch.ones(output_.size(),
|
||||
dtype=output_.dtype,
|
||||
device=output_.device),
|
||||
create_graph=True,
|
||||
retain_graph=True,
|
||||
allow_unused=True,
|
||||
@@ -85,8 +85,8 @@ def grad(output_, input_, components=None, d=None):
|
||||
raise RuntimeError
|
||||
gradients = grad_scalar_output(output_, input_, d)
|
||||
|
||||
elif output_.shape[
|
||||
output_.ndim - 1] >= 2: # vector output ##############################
|
||||
elif output_.shape[output_.ndim -
|
||||
1] >= 2: # vector output ##############################
|
||||
tensor_to_cat = []
|
||||
for i, c in enumerate(components):
|
||||
c_output = output_.extract([c])
|
||||
@@ -281,11 +281,8 @@ def advection(output_, input_, velocity_field, components=None, d=None):
|
||||
if components is None:
|
||||
components = output_.labels
|
||||
|
||||
tmp = (
|
||||
grad(output_, input_, components, d)
|
||||
.reshape(-1, len(components), len(d))
|
||||
.transpose(0, 1)
|
||||
)
|
||||
tmp = (grad(output_, input_, components, d).reshape(-1, len(components),
|
||||
len(d)).transpose(0, 1))
|
||||
|
||||
tmp *= output_.extract(velocity_field)
|
||||
return tmp.sum(dim=2).T
|
||||
|
||||
Reference in New Issue
Block a user