Documentation for v0.1 version (#199)
* Adding Equations, solving typos * improve _code.rst * the team rst and restuctore index.rst * fixing errors --------- Co-authored-by: Dario Coscia <dariocoscia@dhcp-015.eduroam.sissa.it>
This commit is contained in:
committed by
Nicola Demo
parent
3f9305d475
commit
8b7b61b3bd
@@ -9,17 +9,21 @@ class ResidualBlock(nn.Module):
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: He, Kaiming, et al.
|
||||
"Deep residual learning for image recognition."
|
||||
*Deep residual learning for image recognition.*
|
||||
Proceedings of the IEEE conference on computer vision
|
||||
and pattern recognition. 2016..
|
||||
<https://arxiv.org/pdf/1512.03385.pdf>`_.
|
||||
DOI: `<https://arxiv.org/pdf/1512.03385.pdf>`_.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, input_dim, output_dim,
|
||||
hidden_dim, spectral_norm=False,
|
||||
def __init__(self,
|
||||
input_dim,
|
||||
output_dim,
|
||||
hidden_dim,
|
||||
spectral_norm=False,
|
||||
activation=torch.nn.ReLU()):
|
||||
"""Residual block constructor
|
||||
"""
|
||||
Initializes the ResidualBlock module.
|
||||
|
||||
:param int input_dim: Dimension of the input to pass to the
|
||||
feedforward linear layer.
|
||||
@@ -48,9 +52,9 @@ class ResidualBlock(nn.Module):
|
||||
self._activation = activation
|
||||
|
||||
# create layers
|
||||
self.l1 = self._spect_norm(nn.Linear(input_dim, hidden_dim))
|
||||
self.l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim))
|
||||
self.l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
|
||||
self._l1 = self._spect_norm(nn.Linear(input_dim, hidden_dim))
|
||||
self._l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim))
|
||||
self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass for residual block layer.
|
||||
@@ -59,9 +63,9 @@ class ResidualBlock(nn.Module):
|
||||
:return: Output tensor for the residual layer.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
y = self.activation(self.l1(x))
|
||||
y = self.l2(y)
|
||||
x = self.l3(x)
|
||||
y = self._activation(self._l1(x))
|
||||
y = self._l2(y)
|
||||
x = self._l3(x)
|
||||
return y + x
|
||||
|
||||
def _spect_norm(self, x):
|
||||
@@ -74,32 +78,40 @@ class ResidualBlock(nn.Module):
|
||||
"""
|
||||
return nn.utils.spectral_norm(x) if self._spectral_norm else x
|
||||
|
||||
@ property
|
||||
def spectral_norm(self):
|
||||
return self._spectral_norm
|
||||
|
||||
@ property
|
||||
def input_dim(self):
|
||||
return self._input_dim
|
||||
|
||||
@ property
|
||||
def output_dim(self):
|
||||
return self._output_dim
|
||||
|
||||
@ property
|
||||
def hidden_dim(self):
|
||||
return self._hidden_dim
|
||||
|
||||
@ property
|
||||
def activation(self):
|
||||
return self._activation
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
class EnhancedLinear(torch.nn.Module):
|
||||
"""
|
||||
TODO
|
||||
A wrapper class for enhancing a linear layer with activation and/or dropout.
|
||||
|
||||
:param layer: The linear layer to be enhanced.
|
||||
:type layer: torch.nn.Module
|
||||
:param activation: The activation function to be applied after the linear layer.
|
||||
:type activation: torch.nn.Module
|
||||
:param dropout: The dropout probability to be applied after the activation (if provided).
|
||||
:type dropout: float
|
||||
|
||||
:Example:
|
||||
|
||||
>>> linear_layer = torch.nn.Linear(10, 20)
|
||||
>>> activation = torch.nn.ReLU()
|
||||
>>> dropout_prob = 0.5
|
||||
>>> enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
|
||||
"""
|
||||
|
||||
def __init__(self, layer, activation=None, dropout=None):
|
||||
"""
|
||||
Initializes the EnhancedLinear module.
|
||||
|
||||
:param layer: The linear layer to be enhanced.
|
||||
:type layer: torch.nn.Module
|
||||
:param activation: The activation function to be applied after the linear layer.
|
||||
:type activation: torch.nn.Module
|
||||
:param dropout: The dropout probability to be applied after the activation (if provided).
|
||||
:type dropout: float
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
# check consistency
|
||||
@@ -108,23 +120,41 @@ class EnhancedLinear(torch.nn.Module):
|
||||
check_consistency(activation, nn.Module)
|
||||
if dropout is not None:
|
||||
check_consistency(dropout, float)
|
||||
|
||||
|
||||
# assign forward
|
||||
if (dropout is None) and (activation is None):
|
||||
self._model = torch.nn.Sequential(layer)
|
||||
|
||||
elif (dropout is None) and (activation is not None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
activation)
|
||||
|
||||
self._model = torch.nn.Sequential(layer, activation)
|
||||
|
||||
elif (dropout is not None) and (activation is None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
self._drop(dropout))
|
||||
|
||||
self._model = torch.nn.Sequential(layer, self._drop(dropout))
|
||||
|
||||
elif (dropout is not None) and (activation is not None):
|
||||
self._model = torch.nn.Sequential(layer,
|
||||
activation,
|
||||
self._model = torch.nn.Sequential(layer, activation,
|
||||
self._drop(dropout))
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward pass through the enhanced linear module.
|
||||
|
||||
:param x: Input tensor.
|
||||
:type x: torch.Tensor
|
||||
|
||||
:return: Output tensor after passing through the enhanced linear module.
|
||||
:rtype: torch.Tensor
|
||||
"""
|
||||
return self._model(x)
|
||||
|
||||
def _drop(self, p):
|
||||
"""
|
||||
Applies dropout with probability p.
|
||||
|
||||
:param p: Dropout probability.
|
||||
:type p: float
|
||||
|
||||
:return: Dropout layer with the specified probability.
|
||||
:rtype: torch.nn.Dropout
|
||||
"""
|
||||
return torch.nn.Dropout(p)
|
||||
|
||||
Reference in New Issue
Block a user