Skip to content

Metrics

mlpoppyns.learning.metrics.metric_accuracy

Accuracy metric.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)

MetricAccuracy

Bases: MetricBase

Source code in mlpoppyns/learning/metrics/metric_accuracy.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
class MetricAccuracy(MetricBase):
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Computation of the accuracy metric.

        Args:
            output (torch.Tensor): Network output tensor (predictions).
            target (torch.Tensor): Ground truth tensor (labels).

        Returns:
            (torch.Tensor): Percentage / 100 of accurate or correct outputs (predictions that
                match the labels or ground truth).
        """

        with torch.no_grad():
            pred = torch.argmax(output, dim=1)
            assert pred.shape[0] == len(target)
            correct = 0
            correct += torch.sum(pred == target).item()

        return correct / len(target)

    def __str__(self) -> str:
        """
        String representation for the accuracy metric.

        Returns:
            (str): String representation for the accuracy metric.
        """

        return "Accuracy Metric"

    def initial_value(self) -> float:
        """
        Starting value for the metric to start optimization.

        Returns:
            (float): Metric starting value set to minus infinity.
        """

        return -np.inf

    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if a metric value is better than other.

        Args:
            value_a (torch.Tensor): First value to compare.
            value_b (torch.Tensor): Second value to compare.

        Returns:
            (bool): True if the second value is greater than the first value, false
                otherwise.
        """

        return value_b > value_a

__call__(output, target)

Computation of the accuracy metric.

Parameters:

Name Type Description Default
output Tensor

Network output tensor (predictions).

required
target Tensor

Ground truth tensor (labels).

required

Returns:

Type Description
Tensor

Percentage / 100 of accurate or correct outputs (predictions that match the labels or ground truth).

Source code in mlpoppyns/learning/metrics/metric_accuracy.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Computation of the accuracy metric.

    Args:
        output (torch.Tensor): Network output tensor (predictions).
        target (torch.Tensor): Ground truth tensor (labels).

    Returns:
        (torch.Tensor): Percentage / 100 of accurate or correct outputs (predictions that
            match the labels or ground truth).
    """

    with torch.no_grad():
        pred = torch.argmax(output, dim=1)
        assert pred.shape[0] == len(target)
        correct = 0
        correct += torch.sum(pred == target).item()

    return correct / len(target)

__str__()

String representation for the accuracy metric.

Returns:

Type Description
str

String representation for the accuracy metric.

Source code in mlpoppyns/learning/metrics/metric_accuracy.py
37
38
39
40
41
42
43
44
45
def __str__(self) -> str:
    """
    String representation for the accuracy metric.

    Returns:
        (str): String representation for the accuracy metric.
    """

    return "Accuracy Metric"

improved(value_a, value_b)

Check if a metric value is better than other.

Parameters:

Name Type Description Default
value_a Tensor

First value to compare.

required
value_b Tensor

Second value to compare.

required

Returns:

Type Description
bool

True if the second value is greater than the first value, false otherwise.

Source code in mlpoppyns/learning/metrics/metric_accuracy.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if a metric value is better than other.

    Args:
        value_a (torch.Tensor): First value to compare.
        value_b (torch.Tensor): Second value to compare.

    Returns:
        (bool): True if the second value is greater than the first value, false
            otherwise.
    """

    return value_b > value_a

initial_value()

Starting value for the metric to start optimization.

Returns:

Type Description
float

Metric starting value set to minus infinity.

Source code in mlpoppyns/learning/metrics/metric_accuracy.py
47
48
49
50
51
52
53
54
55
def initial_value(self) -> float:
    """
    Starting value for the metric to start optimization.

    Returns:
        (float): Metric starting value set to minus infinity.
    """

    return -np.inf

mlpoppyns.learning.metrics.metric_base

Base metric.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)

MetricBase

Base abstract class for all evaluation metrics.

Source code in mlpoppyns/learning/metrics/metric_base.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class MetricBase:
    """
    Base abstract class for all evaluation metrics.
    """

    @abc.abstractmethod
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Compute the metric value based on the model output and target.

        This method takes the predicted output from the model and the
        corresponding target values, and computes the metric score. The specific
        implementation of this method will vary depending on the type of metric
        being implemented (e.g., Accuracy, Mean relative error...).

        Args:
            output (torch.Tensor): The predicted output from the model.
            target (torch.Tensor): The ground truth values to compare against.

        Returns:
            (float): The computed metric value.
        """

        raise NotImplementedError

    @abc.abstractmethod
    def __str__(self) -> str:
        """
        String representation of the metric.

        This method should provide a human-readable description of the metric,
        including its name and any relevant parameters or characteristics.

        Returns:
            (str): A string that describes the metric.
        """

        raise NotImplementedError

    @abc.abstractmethod
    def initial_value(self) -> float:
        """
        Return the starting value for the metric optimization.

        This method should return the initial value that the metric should start
        with for optimization purposes. For example, in the case of accuracy,
        the initial value could be 0.0, as we want to maximize the accuracy.

        Returns:
            (float): The initial value for the metric optimization.
        """

        raise NotImplementedError

    @abc.abstractmethod
    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if the metric value has improved.

        This method compares two metric values and determines whether the second
        value represents an improvement over the first value. The specific
        implementation of this method will depend on the type of metric being
        used (e.g., for accuracy, a higher value is better, while for loss, a
        lower value is better).

        Args:
            value_a (float): The first metric value to compare.
            value_b (float): The second metric value to compare.

        Returns:
            (bool): True if the second value represents an improvement over the
                first value, False otherwise.
        """

        raise NotImplementedError

__call__(output, target) abstractmethod

Compute the metric value based on the model output and target.

This method takes the predicted output from the model and the corresponding target values, and computes the metric score. The specific implementation of this method will vary depending on the type of metric being implemented (e.g., Accuracy, Mean relative error...).

Parameters:

Name Type Description Default
output Tensor

The predicted output from the model.

required
target Tensor

The ground truth values to compare against.

required

Returns:

Type Description
float

The computed metric value.

Source code in mlpoppyns/learning/metrics/metric_base.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
@abc.abstractmethod
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Compute the metric value based on the model output and target.

    This method takes the predicted output from the model and the
    corresponding target values, and computes the metric score. The specific
    implementation of this method will vary depending on the type of metric
    being implemented (e.g., Accuracy, Mean relative error...).

    Args:
        output (torch.Tensor): The predicted output from the model.
        target (torch.Tensor): The ground truth values to compare against.

    Returns:
        (float): The computed metric value.
    """

    raise NotImplementedError

__str__() abstractmethod

String representation of the metric.

This method should provide a human-readable description of the metric, including its name and any relevant parameters or characteristics.

Returns:

Type Description
str

A string that describes the metric.

Source code in mlpoppyns/learning/metrics/metric_base.py
48
49
50
51
52
53
54
55
56
57
58
59
60
@abc.abstractmethod
def __str__(self) -> str:
    """
    String representation of the metric.

    This method should provide a human-readable description of the metric,
    including its name and any relevant parameters or characteristics.

    Returns:
        (str): A string that describes the metric.
    """

    raise NotImplementedError

improved(value_a, value_b) abstractmethod

Check if the metric value has improved.

This method compares two metric values and determines whether the second value represents an improvement over the first value. The specific implementation of this method will depend on the type of metric being used (e.g., for accuracy, a higher value is better, while for loss, a lower value is better).

Parameters:

Name Type Description Default
value_a float

The first metric value to compare.

required
value_b float

The second metric value to compare.

required

Returns:

Type Description
bool

True if the second value represents an improvement over the first value, False otherwise.

Source code in mlpoppyns/learning/metrics/metric_base.py
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
@abc.abstractmethod
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if the metric value has improved.

    This method compares two metric values and determines whether the second
    value represents an improvement over the first value. The specific
    implementation of this method will depend on the type of metric being
    used (e.g., for accuracy, a higher value is better, while for loss, a
    lower value is better).

    Args:
        value_a (float): The first metric value to compare.
        value_b (float): The second metric value to compare.

    Returns:
        (bool): True if the second value represents an improvement over the
            first value, False otherwise.
    """

    raise NotImplementedError

initial_value() abstractmethod

Return the starting value for the metric optimization.

This method should return the initial value that the metric should start with for optimization purposes. For example, in the case of accuracy, the initial value could be 0.0, as we want to maximize the accuracy.

Returns:

Type Description
float

The initial value for the metric optimization.

Source code in mlpoppyns/learning/metrics/metric_base.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@abc.abstractmethod
def initial_value(self) -> float:
    """
    Return the starting value for the metric optimization.

    This method should return the initial value that the metric should start
    with for optimization purposes. For example, in the case of accuracy,
    the initial value could be 0.0, as we want to maximize the accuracy.

    Returns:
        (float): The initial value for the metric optimization.
    """

    raise NotImplementedError

mlpoppyns.learning.metrics.metric_chi2

chi square metric.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Alberto Garcia Garcia (garciagarcia@ice.csic.es)

MetricAccuracyCHI2

Bases: MetricBase

Source code in mlpoppyns/learning/metrics/metric_chi2.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
class MetricAccuracyCHI2(MetricBase):
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Computation of the accuracy metric defined as the reduced chi square value.
        The value of the reduced chi square should be near 1 for best accuracy.

        Args:
            output (torch.Tensor): Network output tensor (predictions).
            target (torch.Tensor): Ground truth tensor (labels).

        Returns:
            (torch.Tensor): Reduced chi square value computed on a batch.
        """

        with torch.no_grad():
            red_chi2 = (
                (output - target) ** 2 / target
            ).sum() / output.data.nelement()

        return red_chi2

    def __str__(self) -> str:
        """
        String representation for the chi squared metric.

        Returns:
            (str): String representation for the chi squared metric.
        """

        return "Reduced chi square accuracy metric"

    def initial_value(self) -> float:
        """
        Starting value for the metric to start optimization.

        Returns:
            (float): Starting value for the metric to start optimization.
        """

        return np.inf

    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if a metric value is better than the other.

        Args:
            value_a (torch.Tensor): First value to compare (current value).
            value_b (torch.Tensor): Second value to compare (new value).

        Returns:
            (bool): True if the second value is lower than the first value, false
                otherwise.
        """

        return value_b < value_a

__call__(output, target)

Computation of the accuracy metric defined as the reduced chi square value. The value of the reduced chi square should be near 1 for best accuracy.

Parameters:

Name Type Description Default
output Tensor

Network output tensor (predictions).

required
target Tensor

Ground truth tensor (labels).

required

Returns:

Type Description
Tensor

Reduced chi square value computed on a batch.

Source code in mlpoppyns/learning/metrics/metric_chi2.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Computation of the accuracy metric defined as the reduced chi square value.
    The value of the reduced chi square should be near 1 for best accuracy.

    Args:
        output (torch.Tensor): Network output tensor (predictions).
        target (torch.Tensor): Ground truth tensor (labels).

    Returns:
        (torch.Tensor): Reduced chi square value computed on a batch.
    """

    with torch.no_grad():
        red_chi2 = (
            (output - target) ** 2 / target
        ).sum() / output.data.nelement()

    return red_chi2

__str__()

String representation for the chi squared metric.

Returns:

Type Description
str

String representation for the chi squared metric.

Source code in mlpoppyns/learning/metrics/metric_chi2.py
37
38
39
40
41
42
43
44
45
def __str__(self) -> str:
    """
    String representation for the chi squared metric.

    Returns:
        (str): String representation for the chi squared metric.
    """

    return "Reduced chi square accuracy metric"

improved(value_a, value_b)

Check if a metric value is better than the other.

Parameters:

Name Type Description Default
value_a Tensor

First value to compare (current value).

required
value_b Tensor

Second value to compare (new value).

required

Returns:

Type Description
bool

True if the second value is lower than the first value, false otherwise.

Source code in mlpoppyns/learning/metrics/metric_chi2.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if a metric value is better than the other.

    Args:
        value_a (torch.Tensor): First value to compare (current value).
        value_b (torch.Tensor): Second value to compare (new value).

    Returns:
        (bool): True if the second value is lower than the first value, false
            otherwise.
    """

    return value_b < value_a

initial_value()

Starting value for the metric to start optimization.

Returns:

Type Description
float

Starting value for the metric to start optimization.

Source code in mlpoppyns/learning/metrics/metric_chi2.py
47
48
49
50
51
52
53
54
55
def initial_value(self) -> float:
    """
    Starting value for the metric to start optimization.

    Returns:
        (float): Starting value for the metric to start optimization.
    """

    return np.inf

mlpoppyns.learning.metrics.metric_mae

Mean absolute error metric.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Alberto Garcia Garcia (garciagarcia@ice.csic.es)
Vanessa Graber (graber@ice.csic.es)

MetricAccuracyMAE

Bases: MetricBase

Source code in mlpoppyns/learning/metrics/metric_mae.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
class MetricAccuracyMAE(MetricBase):
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Computation of the metric defined as mean absolute error.
        The value of the MAE should be 0 for the best accuracy.

        Args:
            output (torch.Tensor): Network output tensor (predictions).
            target (torch.Tensor): Ground truth tensor (labels).

        Returns:
            (torch.Tensor): Mean absolute error computed over a batch.
        """

        with torch.no_grad():
            self.mae = nn.L1Loss()
            mae = self.mae(output, target)

        return mae

    def __str__(self) -> str:
        """
        String representation for the MAE metric.

        Returns:
            (str): String representation for the MAE metric.
        """

        return "Mean Absolute Error accuracy metric"

    def initial_value(self) -> float:
        """
        Starting value for the metric to start optimization.

        Returns:
            (float): Starting value for the metric to start optimization.
        """

        return np.inf

    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if a metric value is better than other.

        Args:
            value_a (torch.Tensor): First value to compare (current value).
            value_b (torch.Tensor): Second value to compare (new value).

        Returns:
            (bool): True if the second value is lower than the first value, false
                otherwise.
        """

        return value_b < value_a

__call__(output, target)

Computation of the metric defined as mean absolute error. The value of the MAE should be 0 for the best accuracy.

Parameters:

Name Type Description Default
output Tensor

Network output tensor (predictions).

required
target Tensor

Ground truth tensor (labels).

required

Returns:

Type Description
Tensor

Mean absolute error computed over a batch.

Source code in mlpoppyns/learning/metrics/metric_mae.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Computation of the metric defined as mean absolute error.
    The value of the MAE should be 0 for the best accuracy.

    Args:
        output (torch.Tensor): Network output tensor (predictions).
        target (torch.Tensor): Ground truth tensor (labels).

    Returns:
        (torch.Tensor): Mean absolute error computed over a batch.
    """

    with torch.no_grad():
        self.mae = nn.L1Loss()
        mae = self.mae(output, target)

    return mae

__str__()

String representation for the MAE metric.

Returns:

Type Description
str

String representation for the MAE metric.

Source code in mlpoppyns/learning/metrics/metric_mae.py
38
39
40
41
42
43
44
45
46
def __str__(self) -> str:
    """
    String representation for the MAE metric.

    Returns:
        (str): String representation for the MAE metric.
    """

    return "Mean Absolute Error accuracy metric"

improved(value_a, value_b)

Check if a metric value is better than other.

Parameters:

Name Type Description Default
value_a Tensor

First value to compare (current value).

required
value_b Tensor

Second value to compare (new value).

required

Returns:

Type Description
bool

True if the second value is lower than the first value, false otherwise.

Source code in mlpoppyns/learning/metrics/metric_mae.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if a metric value is better than other.

    Args:
        value_a (torch.Tensor): First value to compare (current value).
        value_b (torch.Tensor): Second value to compare (new value).

    Returns:
        (bool): True if the second value is lower than the first value, false
            otherwise.
    """

    return value_b < value_a

initial_value()

Starting value for the metric to start optimization.

Returns:

Type Description
float

Starting value for the metric to start optimization.

Source code in mlpoppyns/learning/metrics/metric_mae.py
48
49
50
51
52
53
54
55
56
def initial_value(self) -> float:
    """
    Starting value for the metric to start optimization.

    Returns:
        (float): Starting value for the metric to start optimization.
    """

    return np.inf

mlpoppyns.learning.metrics.metric_mse

Mean square error metric.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Alberto Garcia Garcia (garciagarcia@ice.csic.es)
Vanessa Graber (graber@ice.csic.es)

MetricAccuracyMSE

Bases: MetricBase

Source code in mlpoppyns/learning/metrics/metric_mse.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
class MetricAccuracyMSE(MetricBase):
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Computation of the metric defined as mean square error.
        The value of the MSE should be 0 for the best accuracy.

        Args:
            output (torch.Tensor): Network output tensor (predictions).
            target (torch.Tensor): Ground truth tensor (labels).

        Returns:
            (torch.Tensor): Mean square error computed over a batch.
        """

        with torch.no_grad():
            self.mse = nn.MSELoss()
            mse = self.mse(output, target)

        return mse

    def __str__(self) -> str:
        """
        String representation for the accuracy metric.

        Returns:
            (str): String representation for the accuracy metric.
        """

        return "Mean square error accuracy metric"

    def initial_value(self) -> float:
        """
        Starting value for the metric to start optimization.

        Returns:
            (float): Starting value for the metric to start optimization.
        """

        return np.inf

    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if a metric value is better than other.

        Args:
            value_a (torch.Tensor): First value to compare (current value).
            value_b (torch.Tensor): Second value to compare (new value).

        Returns:
            (bool): True if the second value is lower than the first value, false
                otherwise.
        """

        return value_b < value_a

__call__(output, target)

Computation of the metric defined as mean square error. The value of the MSE should be 0 for the best accuracy.

Parameters:

Name Type Description Default
output Tensor

Network output tensor (predictions).

required
target Tensor

Ground truth tensor (labels).

required

Returns:

Type Description
Tensor

Mean square error computed over a batch.

Source code in mlpoppyns/learning/metrics/metric_mse.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Computation of the metric defined as mean square error.
    The value of the MSE should be 0 for the best accuracy.

    Args:
        output (torch.Tensor): Network output tensor (predictions).
        target (torch.Tensor): Ground truth tensor (labels).

    Returns:
        (torch.Tensor): Mean square error computed over a batch.
    """

    with torch.no_grad():
        self.mse = nn.MSELoss()
        mse = self.mse(output, target)

    return mse

__str__()

String representation for the accuracy metric.

Returns:

Type Description
str

String representation for the accuracy metric.

Source code in mlpoppyns/learning/metrics/metric_mse.py
38
39
40
41
42
43
44
45
46
def __str__(self) -> str:
    """
    String representation for the accuracy metric.

    Returns:
        (str): String representation for the accuracy metric.
    """

    return "Mean square error accuracy metric"

improved(value_a, value_b)

Check if a metric value is better than other.

Parameters:

Name Type Description Default
value_a Tensor

First value to compare (current value).

required
value_b Tensor

Second value to compare (new value).

required

Returns:

Type Description
bool

True if the second value is lower than the first value, false otherwise.

Source code in mlpoppyns/learning/metrics/metric_mse.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if a metric value is better than other.

    Args:
        value_a (torch.Tensor): First value to compare (current value).
        value_b (torch.Tensor): Second value to compare (new value).

    Returns:
        (bool): True if the second value is lower than the first value, false
            otherwise.
    """

    return value_b < value_a

initial_value()

Starting value for the metric to start optimization.

Returns:

Type Description
float

Starting value for the metric to start optimization.

Source code in mlpoppyns/learning/metrics/metric_mse.py
48
49
50
51
52
53
54
55
56
def initial_value(self) -> float:
    """
    Starting value for the metric to start optimization.

    Returns:
        (float): Starting value for the metric to start optimization.
    """

    return np.inf

mlpoppyns.learning.metrics.metric_rmse

Root mean squared error metric.

Authors:

Michele Ronchi (ronchi@ice.csic.es)
Alberto Garcia Garcia (garciagarcia@ice.csic.es)
Vanessa Graber (graber@ice.csic.es)

MetricAccuracyRMSE

Bases: MetricBase

Source code in mlpoppyns/learning/metrics/metric_rmse.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
class MetricAccuracyRMSE(MetricBase):
    def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
        """
        Computation of the metric defined as root mean squared error.
        The value of the RMSE should be 0 for the best accuracy.

        Args:
            output (torch.Tensor): Network output tensor (predictions).
            target (torch.Tensor): Ground truth tensor (labels).

        Returns:
            (torch.Tensor): Root mean squared error computed over a batch.
        """

        with torch.no_grad():
            self.mse = nn.MSELoss()
            rmse = torch.sqrt(self.mse(output, target))

        return rmse

    def __str__(self) -> str:
        """
        String representation for the RMSE metric.

        Returns:
            (str): String representation for the RMSE metric.
        """

        return "Root mean squared error accuracy metric"

    def initial_value(self) -> float:
        """
        Starting value for the metric to start optimization.

        Returns:
            (float): Starting value for the metric to start optimization.
        """

        return np.inf

    def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
        """
        Check if a metric value is better than other.

        Args:
            value_a (torch.Tensor): First value to compare (current value).
            value_b (torch.Tensor): Second value to compare (new value).

        Returns:
            (bool): True if the second value is lower than the first value, false
                otherwise.
        """

        return value_b < value_a

__call__(output, target)

Computation of the metric defined as root mean squared error. The value of the RMSE should be 0 for the best accuracy.

Parameters:

Name Type Description Default
output Tensor

Network output tensor (predictions).

required
target Tensor

Ground truth tensor (labels).

required

Returns:

Type Description
Tensor

Root mean squared error computed over a batch.

Source code in mlpoppyns/learning/metrics/metric_rmse.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __call__(self, output: torch.Tensor, target: torch.Tensor) -> float:
    """
    Computation of the metric defined as root mean squared error.
    The value of the RMSE should be 0 for the best accuracy.

    Args:
        output (torch.Tensor): Network output tensor (predictions).
        target (torch.Tensor): Ground truth tensor (labels).

    Returns:
        (torch.Tensor): Root mean squared error computed over a batch.
    """

    with torch.no_grad():
        self.mse = nn.MSELoss()
        rmse = torch.sqrt(self.mse(output, target))

    return rmse

__str__()

String representation for the RMSE metric.

Returns:

Type Description
str

String representation for the RMSE metric.

Source code in mlpoppyns/learning/metrics/metric_rmse.py
38
39
40
41
42
43
44
45
46
def __str__(self) -> str:
    """
    String representation for the RMSE metric.

    Returns:
        (str): String representation for the RMSE metric.
    """

    return "Root mean squared error accuracy metric"

improved(value_a, value_b)

Check if a metric value is better than other.

Parameters:

Name Type Description Default
value_a Tensor

First value to compare (current value).

required
value_b Tensor

Second value to compare (new value).

required

Returns:

Type Description
bool

True if the second value is lower than the first value, false otherwise.

Source code in mlpoppyns/learning/metrics/metric_rmse.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
def improved(self, value_a: torch.Tensor, value_b: torch.Tensor) -> bool:
    """
    Check if a metric value is better than other.

    Args:
        value_a (torch.Tensor): First value to compare (current value).
        value_b (torch.Tensor): Second value to compare (new value).

    Returns:
        (bool): True if the second value is lower than the first value, false
            otherwise.
    """

    return value_b < value_a

initial_value()

Starting value for the metric to start optimization.

Returns:

Type Description
float

Starting value for the metric to start optimization.

Source code in mlpoppyns/learning/metrics/metric_rmse.py
48
49
50
51
52
53
54
55
56
def initial_value(self) -> float:
    """
    Starting value for the metric to start optimization.

    Returns:
        (float): Starting value for the metric to start optimization.
    """

    return np.inf

mlpoppyns.learning.metrics.metrics

Metrics.

This is just an empty module that gathers all the available metrics.

Authors:

Alberto Garcia Garcia (garciagarcia@ice.csic.es)