preliminary modifications for N-S

This commit is contained in:
Anna Ivagnes
2022-05-05 17:12:31 +02:00
parent d152fe67e3
commit 8130912926
13 changed files with 213 additions and 162 deletions

View File

@@ -30,7 +30,7 @@ def div(output_, input_):
else: # really to improve
a = []
for o in output_.T:
a.append(grad(o, input_))
a.append(grad(o, input_).extract(['x', 'y']))
div = torch.zeros(output_.shape[0], 1)
for i in range(output_.shape[1]):
div += a[i][:, i].reshape(-1, 1)
@@ -42,4 +42,25 @@ def nabla(output_, input_):
"""
TODO
"""
return div(grad(output_, input_), input_)
return div(grad(output_, input_).extract(['x', 'y']), input_)
def advection_term(output_, input_):
"""
TODO
"""
dimension = len(output_.labels)
for i, label in enumerate(output_.labels):
# compute u dot gradient in each direction
gradient_loc = grad(output_.extract([label]), input_).extract(input_.labels[:dimension])
dim_0 = gradient_loc.shape[0]
dim_1 = gradient_loc.shape[1]
u_dot_grad_loc = torch.bmm(output_.view(dim_0, 1, dim_1),
gradient_loc.view(dim_0, dim_1, 1))
u_dot_grad_loc = LabelTensor(torch.reshape(u_dot_grad_loc,
(u_dot_grad_loc.shape[0], u_dot_grad_loc.shape[1])), [input_.labels[i]])
if i==0:
adv_term = u_dot_grad_loc
else:
adv_term = adv_term.append(u_dot_grad_loc)
return adv_term