Skip to content

Models

mlpoppyns.learning.models.model_base

Base model.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)

ModelBase

Bases: Module

Base abstract class for all models.

This class serves as a blueprint for creating various neural-network architecture models. It defines the essential methods that all models must implement, ensuring consistency.

Source code in mlpoppyns/learning/models/model_base.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
class ModelBase(nn.Module):
    """
    Base abstract class for all models.

    This class serves as a blueprint for creating various neural-network architecture models.
    It defines the essential methods that all models must implement, ensuring consistency.
    """

    @abc.abstractmethod
    def forward(self, *inputs: torch.Tensor) -> torch.Tensor:
        """
        Forward pass.

        Abstract method for the forward pass that must be implemented for each
        model that derives this class to implement its whole forward pass.

        Args:
            inputs (torch.Tensor): The network inputs.

        Returns:
            (torch.Tensor): The network output tensor after forwarding all layers.
        """
        raise NotImplementedError

    def __str__(self) -> str:
        """
        String representation of the model.

        This method should provide a human-readable description of the model,
        including its name and any relevant parameters or characteristics.

        Returns:
            (str): String representation of the model.
        """

        model_parameters = filter(lambda p: p.requires_grad, self.parameters())
        params = sum([np.prod(p.size()) for p in model_parameters])
        return super().__str__() + "\nTrainable parameters: {}".format(params)

__str__()

String representation of the model.

This method should provide a human-readable description of the model, including its name and any relevant parameters or characteristics.

Returns:

Type Description
str

String representation of the model.

Source code in mlpoppyns/learning/models/model_base.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def __str__(self) -> str:
    """
    String representation of the model.

    This method should provide a human-readable description of the model,
    including its name and any relevant parameters or characteristics.

    Returns:
        (str): String representation of the model.
    """

    model_parameters = filter(lambda p: p.requires_grad, self.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    return super().__str__() + "\nTrainable parameters: {}".format(params)

forward(*inputs) abstractmethod

Forward pass.

Abstract method for the forward pass that must be implemented for each model that derives this class to implement its whole forward pass.

Parameters:

Name Type Description Default
inputs Tensor

The network inputs.

()

Returns:

Type Description
Tensor

The network output tensor after forwarding all layers.

Source code in mlpoppyns/learning/models/model_base.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@abc.abstractmethod
def forward(self, *inputs: torch.Tensor) -> torch.Tensor:
    """
    Forward pass.

    Abstract method for the forward pass that must be implemented for each
    model that derives this class to implement its whole forward pass.

    Args:
        inputs (torch.Tensor): The network inputs.

    Returns:
        (torch.Tensor): The network output tensor after forwarding all layers.
    """
    raise NotImplementedError

mlpoppyns.learning.models.model_conv

Model for a convolutional neural network.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Alberto Garcia Garcia (garciagarcia@ice.csic.es)

ModelConv

Bases: ModelBase

A convolutional neural network model

Source code in mlpoppyns/learning/models/model_conv.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
class ModelConv(ModelBase):
    """
    A convolutional neural network model
    """

    def __init__(self, input_shape: np.array, num_parameters: int = 1) -> None:
        """
        CNN model initialization.

        This CNN automatically adapts to the shape of the initial input features.

        Args:
            input_shape (np.array): Shape of the input batch (C x H x W).
            num_parameters (int): Number of parameters to predict.
        """

        super().__init__()
        self.conv1 = nn.Conv2d(input_shape[0], 32, 3)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.pool = nn.MaxPool2d(2, 2)

        # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
        # it through the convolution layers in order to save the shape of the input features after the convolution
        # layer and automatically initialize the linear layers with the right shape.
        x = torch.randn(input_shape).view(
            -1, input_shape[0], input_shape[1], input_shape[2]
        )
        self._to_linear = None
        self.convs(x)

        self.fc1 = nn.Linear(self._to_linear, 64)
        self.fc2 = nn.Linear(64, num_parameters)

    def convs(self, x: torch.Tensor) -> torch.Tensor:
        """
        Convolution and pooling layers forward pass.

        Args:
            x (torch.Tensor): Input tensor for the convolution layers.

        Returns:
            (torch.Tensor): Output tensor of the convolution and pooling layers.
        """

        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))

        # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
        if self._to_linear is None:
            self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        return x

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass.

        Args:
            x (torch.Tensor): Input tensor for the network.

        Returns:
            (torch.Tensor): Output tensor of the network after forwarding all layers.

        """

        x = self.convs(x)
        x = x.view(-1, self._to_linear)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)

        return x

__init__(input_shape, num_parameters=1)

CNN model initialization.

This CNN automatically adapts to the shape of the initial input features.

Parameters:

Name Type Description Default
input_shape array

Shape of the input batch (C x H x W).

required
num_parameters int

Number of parameters to predict.

1
Source code in mlpoppyns/learning/models/model_conv.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
def __init__(self, input_shape: np.array, num_parameters: int = 1) -> None:
    """
    CNN model initialization.

    This CNN automatically adapts to the shape of the initial input features.

    Args:
        input_shape (np.array): Shape of the input batch (C x H x W).
        num_parameters (int): Number of parameters to predict.
    """

    super().__init__()
    self.conv1 = nn.Conv2d(input_shape[0], 32, 3)
    self.conv2 = nn.Conv2d(32, 64, 3)
    self.pool = nn.MaxPool2d(2, 2)

    # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
    # it through the convolution layers in order to save the shape of the input features after the convolution
    # layer and automatically initialize the linear layers with the right shape.
    x = torch.randn(input_shape).view(
        -1, input_shape[0], input_shape[1], input_shape[2]
    )
    self._to_linear = None
    self.convs(x)

    self.fc1 = nn.Linear(self._to_linear, 64)
    self.fc2 = nn.Linear(64, num_parameters)

convs(x)

Convolution and pooling layers forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the convolution layers.

required

Returns:

Type Description
Tensor

Output tensor of the convolution and pooling layers.

Source code in mlpoppyns/learning/models/model_conv.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def convs(self, x: torch.Tensor) -> torch.Tensor:
    """
    Convolution and pooling layers forward pass.

    Args:
        x (torch.Tensor): Input tensor for the convolution layers.

    Returns:
        (torch.Tensor): Output tensor of the convolution and pooling layers.
    """

    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))

    # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
    if self._to_linear is None:
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

    return x

forward(x)

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the network.

required

Returns:

Type Description
Tensor

Output tensor of the network after forwarding all layers.

Source code in mlpoppyns/learning/models/model_conv.py
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass.

    Args:
        x (torch.Tensor): Input tensor for the network.

    Returns:
        (torch.Tensor): Output tensor of the network after forwarding all layers.

    """

    x = self.convs(x)
    x = x.view(-1, self._to_linear)
    x = F.relu(self.fc1(x))
    x = self.fc2(x)

    return x

mlpoppyns.learning.models.model_conv_sbi

Model for a deeper convolutional neural network used as an embedding network in the sbi framework to compress the input features into a latent vector. The architecture consists of two blocks of convolutional layers, each followed by a max-pooling layer. The final output is flattened and passed through a fully connected layer to produce the latent vector used for inference.

Authors:

Michele Ronchi (ronchi@ice.csic.es)

ModelConvSBI

Bases: ModelBase

A convolutional neural network model with 2 convolutional filters.

Source code in mlpoppyns/learning/models/model_conv_sbi.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
class ModelConvSBI(ModelBase):
    """
    A convolutional neural network model with 2 convolutional filters.
    """

    def __init__(
        self, input_shape: np.array, len_output_layer: int = 1
    ) -> None:
        """
        CNN model initialization.

        This CNN automatically adapts to the shape of the initial input features.

        Args:
            input_shape (np.array): Shape of the input batch (C x H x W).
            len_output_layer (int): Length of the latent vector.
        """

        super().__init__()
        self.conv1 = nn.Conv2d(input_shape[0], 32, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)

        # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
        # it through the convolution layers in order to save the shape of the input features after the convolution
        # layer and automatically initialize the linear layers with the right shape.
        x = torch.randn(input_shape).view(
            -1, input_shape[0], input_shape[1], input_shape[2]
        )
        self._to_linear = None
        self.convs(x)

        self.fc1 = nn.Linear(self._to_linear, len_output_layer)

    def convs(self, x: torch.Tensor) -> torch.Tensor:
        """
        Convolution and pooling layers forward pass.

        Args:
            x (torch.Tensor): Input tensor for the convolution layers.

        Returns:
            (torch.Tensor): Output tensor of the convolution and pooling layers.
        """

        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))

        # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
        if self._to_linear is None:
            self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        return x

    def forward(self, x: torch.Tensor):
        """
        Forward pass.

        Args:
            x (torch.Tensor): Input tensor for the network.

        Returns:
            (torch.Tensor): Output tensor of the network after forwarding all layers.
        """

        x = self.convs(x)
        x = x.view(-1, self._to_linear)
        x = F.relu(self.fc1(x))

        return x

__init__(input_shape, len_output_layer=1)

CNN model initialization.

This CNN automatically adapts to the shape of the initial input features.

Parameters:

Name Type Description Default
input_shape array

Shape of the input batch (C x H x W).

required
len_output_layer int

Length of the latent vector.

1
Source code in mlpoppyns/learning/models/model_conv_sbi.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def __init__(
    self, input_shape: np.array, len_output_layer: int = 1
) -> None:
    """
    CNN model initialization.

    This CNN automatically adapts to the shape of the initial input features.

    Args:
        input_shape (np.array): Shape of the input batch (C x H x W).
        len_output_layer (int): Length of the latent vector.
    """

    super().__init__()
    self.conv1 = nn.Conv2d(input_shape[0], 32, kernel_size=3, padding=1)
    self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
    self.pool = nn.MaxPool2d(2, 2)

    # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
    # it through the convolution layers in order to save the shape of the input features after the convolution
    # layer and automatically initialize the linear layers with the right shape.
    x = torch.randn(input_shape).view(
        -1, input_shape[0], input_shape[1], input_shape[2]
    )
    self._to_linear = None
    self.convs(x)

    self.fc1 = nn.Linear(self._to_linear, len_output_layer)

convs(x)

Convolution and pooling layers forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the convolution layers.

required

Returns:

Type Description
Tensor

Output tensor of the convolution and pooling layers.

Source code in mlpoppyns/learning/models/model_conv_sbi.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def convs(self, x: torch.Tensor) -> torch.Tensor:
    """
    Convolution and pooling layers forward pass.

    Args:
        x (torch.Tensor): Input tensor for the convolution layers.

    Returns:
        (torch.Tensor): Output tensor of the convolution and pooling layers.
    """

    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))

    # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
    if self._to_linear is None:
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

    return x

forward(x)

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the network.

required

Returns:

Type Description
Tensor

Output tensor of the network after forwarding all layers.

Source code in mlpoppyns/learning/models/model_conv_sbi.py
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
def forward(self, x: torch.Tensor):
    """
    Forward pass.

    Args:
        x (torch.Tensor): Input tensor for the network.

    Returns:
        (torch.Tensor): Output tensor of the network after forwarding all layers.
    """

    x = self.convs(x)
    x = x.view(-1, self._to_linear)
    x = F.relu(self.fc1(x))

    return x

mlpoppyns.learning.models.model_conv_sbi_deep

Model for a deeper convolutional neural network used as an embedding network in the sbi framework to compress the input features into a latent vector. The architecture consists of three blocks of convolutional layers, each followed by a max-pooling layer. The final output is flattened and passed through a fully connected layer to produce the latent vector used for inference.

Authors:

Michele Ronchi (ronchi@ice.csic.es)

ModelConvSBIdeep

Bases: ModelBase

A convolutional neural network model with 4 convolutional filters.

Source code in mlpoppyns/learning/models/model_conv_sbi_deep.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
class ModelConvSBIdeep(ModelBase):
    """
    A convolutional neural network model with 4 convolutional filters.
    """

    def __init__(
        self, input_shape: np.array, len_output_layer: int = 1
    ) -> None:
        """
        CNN model initialization.

        This CNN automatically adapts to the shape of the initial input features.

        Args:
            input_shape (np.array): Shape of the input batch (C x H x W).
            len_output_layer (int): Length of the latent vector.
        """

        super().__init__()
        self.conv1 = nn.Conv2d(input_shape[0], 32, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)

        # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
        # it through the convolution layers in order to save the shape of the input features after the convolution
        # layer and automatically initialize the linear layers with the right shape.
        x = torch.randn(input_shape).view(
            -1, input_shape[0], input_shape[1], input_shape[2]
        )
        self._to_linear = None
        self.convs(x)

        self.fc1 = nn.Linear(self._to_linear, len_output_layer)

    def convs(self, x: torch.Tensor) -> torch.Tensor:
        """
        Convolution and pooling layers forward pass.

        Args:
            x (torch.Tensor): Input tensor for the convolution layers.

        Returns:
            (torch.Tensor): Output tensor of the convolution and pooling layers.
        """

        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        x = self.pool(F.relu(self.conv4(x)))

        # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
        if self._to_linear is None:
            self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        return x

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass.

        Args:
            x (torch.Tensor): Input tensor for the network.

        Returns:
            (torch.Tensor): Output tensor of the network after forwarding all layers.
        """

        x = self.convs(x)
        x = x.view(-1, self._to_linear)
        x = F.relu(self.fc1(x))

        return x

__init__(input_shape, len_output_layer=1)

CNN model initialization.

This CNN automatically adapts to the shape of the initial input features.

Parameters:

Name Type Description Default
input_shape array

Shape of the input batch (C x H x W).

required
len_output_layer int

Length of the latent vector.

1
Source code in mlpoppyns/learning/models/model_conv_sbi_deep.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def __init__(
    self, input_shape: np.array, len_output_layer: int = 1
) -> None:
    """
    CNN model initialization.

    This CNN automatically adapts to the shape of the initial input features.

    Args:
        input_shape (np.array): Shape of the input batch (C x H x W).
        len_output_layer (int): Length of the latent vector.
    """

    super().__init__()
    self.conv1 = nn.Conv2d(input_shape[0], 32, kernel_size=3, padding=1)
    self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
    self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
    self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
    self.pool = nn.MaxPool2d(2, 2)

    # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
    # it through the convolution layers in order to save the shape of the input features after the convolution
    # layer and automatically initialize the linear layers with the right shape.
    x = torch.randn(input_shape).view(
        -1, input_shape[0], input_shape[1], input_shape[2]
    )
    self._to_linear = None
    self.convs(x)

    self.fc1 = nn.Linear(self._to_linear, len_output_layer)

convs(x)

Convolution and pooling layers forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the convolution layers.

required

Returns:

Type Description
Tensor

Output tensor of the convolution and pooling layers.

Source code in mlpoppyns/learning/models/model_conv_sbi_deep.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def convs(self, x: torch.Tensor) -> torch.Tensor:
    """
    Convolution and pooling layers forward pass.

    Args:
        x (torch.Tensor): Input tensor for the convolution layers.

    Returns:
        (torch.Tensor): Output tensor of the convolution and pooling layers.
    """

    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = self.pool(F.relu(self.conv3(x)))
    x = self.pool(F.relu(self.conv4(x)))

    # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
    if self._to_linear is None:
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

    return x

forward(x)

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the network.

required

Returns:

Type Description
Tensor

Output tensor of the network after forwarding all layers.

Source code in mlpoppyns/learning/models/model_conv_sbi_deep.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass.

    Args:
        x (torch.Tensor): Input tensor for the network.

    Returns:
        (torch.Tensor): Output tensor of the network after forwarding all layers.
    """

    x = self.convs(x)
    x = x.view(-1, self._to_linear)
    x = F.relu(self.fc1(x))

    return x

mlpoppyns.learning.models.model_conv_sbi_shallow

Model for a shallow convolutional neural network used as an embedding network in the sbi framework to compress the input features into a latent vector. The neural network is composed of a single convolutional layer after a max-pool layer, followed by a fully connected layer that will serve as the latent vector.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Celsa Pardo Araujo (pardo@ice.csic.es)

ModelConvSBIshallow

Bases: ModelBase

A convolutional neural network model with 1 convolutional filters.

Source code in mlpoppyns/learning/models/model_conv_sbi_shallow.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class ModelConvSBIshallow(ModelBase):
    """
    A convolutional neural network model with 1 convolutional filters.
    """

    def __init__(
        self, input_shape: np.array, len_output_layer: int = 1
    ) -> None:
        """
        CNN model initialization.

        This CNN automatically adapts to the shape of the initial input features.

        Args:
            input_shape (np.array): Shape of the input batch (C x H x W).
            len_output_layer (int): Length of the latent vector.
        """

        super().__init__()
        self.conv1 = nn.Conv2d(input_shape[0], 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)

        # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
        # it through the convolution layers in order to save the shape of the input features after the convolution
        # layer and automatically initialize the linear layers with the right shape.
        x = torch.randn(input_shape).view(
            -1, input_shape[0], input_shape[1], input_shape[2]
        )
        self._to_linear = None
        self.convs(x)

        self.fc1 = nn.Linear(self._to_linear, len_output_layer)

    def convs(self, x: torch.Tensor) -> torch.Tensor:
        """
        Convolution and pooling layers forward pass.

        Args:
            x (torch.Tensor): Input tensor for the convolution layers.

        Returns:
            (torch.Tensor): Output tensor of the convolution and pooling layers.
        """

        x = self.pool(F.relu(self.conv1(x)))

        # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
        if self._to_linear is None:
            self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

        return x

    def forward(self, x: torch.Tensor):
        """
        Forward pass.

        Args:
            x (torch.Tensor): Input tensor for the network.

        Returns:
            (torch.Tensor): Output tensor of the network after forwarding all layers.
        """

        x = self.convs(x)
        x = x.view(-1, self._to_linear)
        x = F.relu(self.fc1(x))

        return x

__init__(input_shape, len_output_layer=1)

CNN model initialization.

This CNN automatically adapts to the shape of the initial input features.

Parameters:

Name Type Description Default
input_shape array

Shape of the input batch (C x H x W).

required
len_output_layer int

Length of the latent vector.

1
Source code in mlpoppyns/learning/models/model_conv_sbi_shallow.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def __init__(
    self, input_shape: np.array, len_output_layer: int = 1
) -> None:
    """
    CNN model initialization.

    This CNN automatically adapts to the shape of the initial input features.

    Args:
        input_shape (np.array): Shape of the input batch (C x H x W).
        len_output_layer (int): Length of the latent vector.
    """

    super().__init__()
    self.conv1 = nn.Conv2d(input_shape[0], 64, kernel_size=3, padding=1)
    self.pool = nn.MaxPool2d(2, 2)

    # Create a mock input with the same shape of the real input drawing values from a normal distribution and pass
    # it through the convolution layers in order to save the shape of the input features after the convolution
    # layer and automatically initialize the linear layers with the right shape.
    x = torch.randn(input_shape).view(
        -1, input_shape[0], input_shape[1], input_shape[2]
    )
    self._to_linear = None
    self.convs(x)

    self.fc1 = nn.Linear(self._to_linear, len_output_layer)

convs(x)

Convolution and pooling layers forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the convolution layers.

required

Returns:

Type Description
Tensor

Output tensor of the convolution and pooling layers.

Source code in mlpoppyns/learning/models/model_conv_sbi_shallow.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def convs(self, x: torch.Tensor) -> torch.Tensor:
    """
    Convolution and pooling layers forward pass.

    Args:
        x (torch.Tensor): Input tensor for the convolution layers.

    Returns:
        (torch.Tensor): Output tensor of the convolution and pooling layers.
    """

    x = self.pool(F.relu(self.conv1(x)))

    # If the dimension of the flattened input features to the linear layers has not been saved yet, save it.
    if self._to_linear is None:
        self._to_linear = x[0].shape[0] * x[0].shape[1] * x[0].shape[2]

    return x

forward(x)

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the network.

required

Returns:

Type Description
Tensor

Output tensor of the network after forwarding all layers.

Source code in mlpoppyns/learning/models/model_conv_sbi_shallow.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def forward(self, x: torch.Tensor):
    """
    Forward pass.

    Args:
        x (torch.Tensor): Input tensor for the network.

    Returns:
        (torch.Tensor): Output tensor of the network after forwarding all layers.
    """

    x = self.convs(x)
    x = x.view(-1, self._to_linear)
    x = F.relu(self.fc1(x))

    return x

mlpoppyns.learning.models.model_linear

Model for a simple linear neural network.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)

ModelLinear

Bases: ModelBase

A linear neural network Model.

Source code in mlpoppyns/learning/models/model_linear.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
class ModelLinear(ModelBase):
    """
    A linear neural network Model.
    """

    def __init__(
        self, input_shape: np.array = None, num_parameters: int = 1
    ) -> None:
        """
        Linear model initialization.

        Args:
            input_shape (np.array): Shape of the input batch (C x H x W).
            num_parameters (int): Number of parameters to predict.
        """

        super().__init__()

        input_features = input_shape[0] * input_shape[1] * input_shape[2]
        self.fc1 = nn.Linear(input_features, num_parameters)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Forward pass.

        Args:
            x (torch.Tensor): Input tensor for the network.

        Returns:
            (torch.Tensor): Output tensor of the network after forwarding all layers.
        """

        x = x.view(x.shape[0], -1)
        x = F.relu(self.fc1(x))

        return x

__init__(input_shape=None, num_parameters=1)

Linear model initialization.

Parameters:

Name Type Description Default
input_shape array

Shape of the input batch (C x H x W).

None
num_parameters int

Number of parameters to predict.

1
Source code in mlpoppyns/learning/models/model_linear.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __init__(
    self, input_shape: np.array = None, num_parameters: int = 1
) -> None:
    """
    Linear model initialization.

    Args:
        input_shape (np.array): Shape of the input batch (C x H x W).
        num_parameters (int): Number of parameters to predict.
    """

    super().__init__()

    input_features = input_shape[0] * input_shape[1] * input_shape[2]
    self.fc1 = nn.Linear(input_features, num_parameters)

forward(x)

Forward pass.

Parameters:

Name Type Description Default
x Tensor

Input tensor for the network.

required

Returns:

Type Description
Tensor

Output tensor of the network after forwarding all layers.

Source code in mlpoppyns/learning/models/model_linear.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def forward(self, x: torch.Tensor) -> torch.Tensor:
    """
    Forward pass.

    Args:
        x (torch.Tensor): Input tensor for the network.

    Returns:
        (torch.Tensor): Output tensor of the network after forwarding all layers.
    """

    x = x.view(x.shape[0], -1)
    x = F.relu(self.fc1(x))

    return x

mlpoppyns.learning.models.models

Models.

This is just an empty module that gathers all the available modules.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)