Fix Codacy Warnings (#477)

---------

Co-authored-by: Dario Coscia <dariocos99@gmail.com>
This commit is contained in:
Filippo Olivo
2025-03-10 15:38:45 +01:00
committed by Nicola Demo
parent e3790e049a
commit 4177bfbb50
157 changed files with 3473 additions and 3839 deletions

View File

@@ -7,10 +7,9 @@ def test_constructor_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
res_block = ResidualBlock(input_dim=10,
output_dim=3,
hidden_dim=4,
spectral_norm=True)
res_block = ResidualBlock(
input_dim=10, output_dim=3, hidden_dim=4, spectral_norm=True
)
def test_forward_residual_block():
@@ -22,8 +21,9 @@ def test_forward_residual_block():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_residual_block():
res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4)
x = torch.rand(size=(80, 10))
@@ -31,27 +31,37 @@ def test_backward_residual_block():
y = res_block(x)
l = torch.mean(y)
l.backward()
assert x._grad.shape == torch.Size([80,10])
assert x._grad.shape == torch.Size([80, 10])
def test_constructor_no_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
enhanced_linear = EnhancedLinear(linear_layer)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_no_dropout():
linear_layer = nn.Linear(10, 20)
activation = nn.ReLU()
enhanced_linear = EnhancedLinear(linear_layer, activation)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_constructor_no_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, dropout=dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
)
def test_constructor_with_activation_with_dropout():
linear_layer = nn.Linear(10, 20)
@@ -59,7 +69,10 @@ def test_constructor_with_activation_with_dropout():
dropout_prob = 0.5
enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters()))
assert len(list(enhanced_linear.parameters())) == len(
list(linear_layer.parameters())
) + len(list(activation.parameters()))
def test_forward_enhanced_linear_no_dropout():
@@ -70,8 +83,9 @@ def test_forward_enhanced_linear_no_dropout():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_no_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3))
x = torch.rand(size=(80, 10))
@@ -81,6 +95,7 @@ def test_backward_enhanced_linear_no_dropout():
l.backward()
assert x._grad.shape == torch.Size([80, 10])
def test_forward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
@@ -90,8 +105,9 @@ def test_forward_enhanced_linear_dropout():
assert y.shape[1] == 3
assert y.shape[0] == x.shape[0]
def test_backward_enhanced_linear_dropout():
enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5)
x = torch.rand(size=(80, 10))