🎨 Format Python code with psf/black
This commit is contained in:
@@ -93,7 +93,7 @@ class LabelTensor(torch.Tensor):
|
|||||||
labels = self.labels
|
labels = self.labels
|
||||||
copy_tensor = deepcopy(self.tensor)
|
copy_tensor = deepcopy(self.tensor)
|
||||||
return LabelTensor(copy_tensor, labels)
|
return LabelTensor(copy_tensor, labels)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def labels(self):
|
def labels(self):
|
||||||
"""Property decorator for labels
|
"""Property decorator for labels
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ __all__ = [
|
|||||||
"FourierIntegralKernel",
|
"FourierIntegralKernel",
|
||||||
"KernelNeuralOperator",
|
"KernelNeuralOperator",
|
||||||
"AveragingNeuralOperator",
|
"AveragingNeuralOperator",
|
||||||
"LowRankNeuralOperator"
|
"LowRankNeuralOperator",
|
||||||
]
|
]
|
||||||
|
|
||||||
from .feed_forward import FeedForward, ResidualFeedForward
|
from .feed_forward import FeedForward, ResidualFeedForward
|
||||||
|
|||||||
@@ -27,4 +27,4 @@ from .pod import PODBlock
|
|||||||
from .embedding import PeriodicBoundaryEmbedding
|
from .embedding import PeriodicBoundaryEmbedding
|
||||||
from .avno_layer import AVNOBlock
|
from .avno_layer import AVNOBlock
|
||||||
from .lowrank_layer import LowRankBlock
|
from .lowrank_layer import LowRankBlock
|
||||||
from .adaptive_func import AdaptiveActivationFunction
|
from .adaptive_func import AdaptiveActivationFunction
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
import torch
|
import torch
|
||||||
|
|
||||||
from pina.utils import check_consistency
|
from pina.utils import check_consistency
|
||||||
import pina.model as pm # avoid circular import
|
import pina.model as pm # avoid circular import
|
||||||
|
|
||||||
|
|
||||||
class LowRankBlock(torch.nn.Module):
|
class LowRankBlock(torch.nn.Module):
|
||||||
@@ -42,14 +42,16 @@ class LowRankBlock(torch.nn.Module):
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(
|
||||||
input_dimensions,
|
self,
|
||||||
embedding_dimenion,
|
input_dimensions,
|
||||||
rank,
|
embedding_dimenion,
|
||||||
inner_size=20,
|
rank,
|
||||||
n_layers=2,
|
inner_size=20,
|
||||||
func=torch.nn.Tanh,
|
n_layers=2,
|
||||||
bias=True):
|
func=torch.nn.Tanh,
|
||||||
|
bias=True,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
:param int input_dimensions: The number of input components of the
|
:param int input_dimensions: The number of input components of the
|
||||||
model.
|
model.
|
||||||
@@ -78,10 +80,14 @@ class LowRankBlock(torch.nn.Module):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
# Assignment (check consistency inside FeedForward)
|
# Assignment (check consistency inside FeedForward)
|
||||||
self._basis = pm.FeedForward(input_dimensions=input_dimensions,
|
self._basis = pm.FeedForward(
|
||||||
output_dimensions=2*rank*embedding_dimenion,
|
input_dimensions=input_dimensions,
|
||||||
inner_size=inner_size, n_layers=n_layers,
|
output_dimensions=2 * rank * embedding_dimenion,
|
||||||
func=func, bias=bias)
|
inner_size=inner_size,
|
||||||
|
n_layers=n_layers,
|
||||||
|
func=func,
|
||||||
|
bias=bias,
|
||||||
|
)
|
||||||
self._nn = torch.nn.Linear(embedding_dimenion, embedding_dimenion)
|
self._nn = torch.nn.Linear(embedding_dimenion, embedding_dimenion)
|
||||||
|
|
||||||
check_consistency(rank, int)
|
check_consistency(rank, int)
|
||||||
@@ -115,15 +121,15 @@ class LowRankBlock(torch.nn.Module):
|
|||||||
# extract basis
|
# extract basis
|
||||||
basis = self._basis(coords)
|
basis = self._basis(coords)
|
||||||
# reshape [B, N, D, 2*rank]
|
# reshape [B, N, D, 2*rank]
|
||||||
shape = list(basis.shape[:-1]) + [-1, 2*self.rank]
|
shape = list(basis.shape[:-1]) + [-1, 2 * self.rank]
|
||||||
basis = basis.reshape(shape)
|
basis = basis.reshape(shape)
|
||||||
# divide
|
# divide
|
||||||
psi = basis[..., :self.rank]
|
psi = basis[..., : self.rank]
|
||||||
phi = basis[..., self.rank:]
|
phi = basis[..., self.rank :]
|
||||||
# compute dot product
|
# compute dot product
|
||||||
coeff = torch.einsum('...dr,...d->...r', psi,x)
|
coeff = torch.einsum("...dr,...d->...r", psi, x)
|
||||||
# expand the basis
|
# expand the basis
|
||||||
expansion = torch.einsum('...r,...dr->...d', coeff,phi)
|
expansion = torch.einsum("...r,...dr->...d", coeff, phi)
|
||||||
# apply linear layer and return
|
# apply linear layer and return
|
||||||
return self._func(self._nn(x) + expansion)
|
return self._func(self._nn(x) + expansion)
|
||||||
|
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class LowRankNeuralOperator(KernelNeuralOperator):
|
|||||||
inner_size=20,
|
inner_size=20,
|
||||||
n_layers=2,
|
n_layers=2,
|
||||||
func=torch.nn.Tanh,
|
func=torch.nn.Tanh,
|
||||||
bias=True
|
bias=True,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
:param torch.nn.Module lifting_net: The neural network for lifting
|
:param torch.nn.Module lifting_net: The neural network for lifting
|
||||||
@@ -105,13 +105,18 @@ class LowRankNeuralOperator(KernelNeuralOperator):
|
|||||||
self.coordinates_indices = coordinates_indices
|
self.coordinates_indices = coordinates_indices
|
||||||
self.field_indices = field_indices
|
self.field_indices = field_indices
|
||||||
integral_net = nn.Sequential(
|
integral_net = nn.Sequential(
|
||||||
*[LowRankBlock(input_dimensions=len(coordinates_indices),
|
*[
|
||||||
embedding_dimenion=output_lifting_net,
|
LowRankBlock(
|
||||||
rank=rank,
|
input_dimensions=len(coordinates_indices),
|
||||||
inner_size=inner_size,
|
embedding_dimenion=output_lifting_net,
|
||||||
n_layers=n_layers,
|
rank=rank,
|
||||||
func=func,
|
inner_size=inner_size,
|
||||||
bias=bias) for _ in range(n_kernel_layers)]
|
n_layers=n_layers,
|
||||||
|
func=func,
|
||||||
|
bias=bias,
|
||||||
|
)
|
||||||
|
for _ in range(n_kernel_layers)
|
||||||
|
]
|
||||||
)
|
)
|
||||||
super().__init__(lifting_net, integral_net, projecting_net)
|
super().__init__(lifting_net, integral_net, projecting_net)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user