From 3b9581548fa91f482e941f3910f0546aa9226e91 Mon Sep 17 00:00:00 2001 From: Ishan Kumar Date: Sun, 5 Dec 2021 12:34:09 +0530 Subject: [PATCH 1/5] added doctest for avg_prec, prec_recall_curve, ROC_AUC --- ignite/contrib/metrics/average_precision.py | 11 +++++- .../contrib/metrics/precision_recall_curve.py | 17 ++++++++-- ignite/contrib/metrics/roc_auc.py | 34 ++++++++++++------- 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py index c75e9cb68a28..9f1fefb57d43 100644 --- a/ignite/contrib/metrics/average_precision.py +++ b/ignite/contrib/metrics/average_precision.py @@ -33,14 +33,23 @@ class AveragePrecision(EpochMetric): AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python + .. testcode:: def activated_output_transform(output): y_pred, y = output y_pred = torch.softmax(y_pred, dim=1) return y_pred, y + y_pred = torch.Tensor([[3.89, 0.21], [0.30, 0.9], [0.46, 0.14], [0.16, 0.2]]) + y_true = torch.tensor([[1, 1], [1, 1], [1, 0], [0, 1]]) avg_precision = AveragePrecision(activated_output_transform) + avg_precision.attach(default_evaluator, 'average_precision') + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['average_precision']) + + .. testoutput:: + + 0.9166... """ diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 9af52e1dbe0b..06fefc04cae1 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -35,14 +35,27 @@ class PrecisionRecallCurve(EpochMetric): PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python + .. testcode:: def activated_output_transform(output): y_pred, y = output y_pred = torch.sigmoid(y_pred) return y_pred, y + y_pred = torch.tensor([-3, 0.4, 0.9, 8]) + y_true = torch.tensor([0, 0, 1, 1]) + prec_recall_curve = PrecisionRecallCurve(activated_output_transform) + prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') + state = default_evaluator.run([[y_pred, y_true]]) - roc_auc = PrecisionRecallCurve(activated_output_transform) + print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) + print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) + print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) + + .. testoutput:: + + Precision [1.0, 1.0, 1.0] + Recall [1.0, 0.5, 0.0] + Thresholds [0.7109, 0.9997] """ diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py index 114f647a96c8..58716d585d9b 100644 --- a/ignite/contrib/metrics/roc_auc.py +++ b/ignite/contrib/metrics/roc_auc.py @@ -41,15 +41,18 @@ class ROC_AUC(EpochMetric): ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python + .. testcode:: - def activated_output_transform(output): - y_pred, y = output - y_pred = torch.sigmoid(y_pred) - return y_pred, y + roc_auc = ROC_AUC(sigmoid_output_transform) + roc_auc.attach(default_evaluator, 'roc_auc') + y_pred = torch.tensor([[-3], [0.4], [0.9], [8]]) + y_true = torch.tensor([[0], [0], [1], [0]]) + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['roc_auc']) - roc_auc = ROC_AUC(activated_output_transform) + .. testoutput:: + 0.6666... """ def __init__( @@ -88,15 +91,22 @@ class RocCurve(EpochMetric): RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. code-block:: python + .. testcode:: - def activated_output_transform(output): - y_pred, y = output - y_pred = torch.sigmoid(y_pred) - return y_pred, y + roc_auc = RocCurve(sigmoid_output_transform) + roc_auc.attach(default_evaluator, 'roc_auc') + y_pred = torch.tensor([-3, 0.4, 0.9, 8]) + y_true = torch.tensor([0, 0, 1, 0]) + state = default_evaluator.run([[y_pred, y_true]]) + print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()]) + print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()]) + print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()]) - roc_auc = RocCurve(activated_output_transform) + .. testoutput:: + FPR [0.0, 0.333, 0.333, 1.0] + TPR [0.0, 0.0, 1.0, 1.0] + Thresholds [2.0, 1.0, 0.711, 0.047] """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: From f6133bc538c588562a4d3b71e3fdbbc06666ed32 Mon Sep 17 00:00:00 2001 From: Ishan Kumar Date: Sun, 5 Dec 2021 12:55:26 +0530 Subject: [PATCH 2/5] added sigmoid func in conf.py --- docs/source/conf.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index dc0b691e2d99..281b46ee9f53 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -336,6 +336,7 @@ def run(self): import torch from torch import nn, optim +import numpy as np from ignite.engine import * from ignite.handlers import * @@ -374,6 +375,11 @@ def train_step(engine, batch): ('fc', nn.Linear(2, 1)) ])) +def sigmoid_output_transform(output): + y_pred, y = output + y_pred = torch.sigmoid(y_pred) + return y_pred, y + manual_seed(666) """ From dd272e49f9e0329962ca1622df595ec95b4ab8eb Mon Sep 17 00:00:00 2001 From: Ishan Kumar Date: Sun, 5 Dec 2021 13:04:12 +0530 Subject: [PATCH 3/5] removed redundant dependency --- docs/source/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 281b46ee9f53..1a90ff3f552d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -336,7 +336,6 @@ def run(self): import torch from torch import nn, optim -import numpy as np from ignite.engine import * from ignite.handlers import * From d8981109d0fde66be3b9a07a317fcbed02f16d32 Mon Sep 17 00:00:00 2001 From: Ishan Kumar Date: Sun, 5 Dec 2021 18:19:14 +0530 Subject: [PATCH 4/5] removed transform functions --- docs/source/conf.py | 5 ----- ignite/contrib/metrics/average_precision.py | 13 +++++-------- ignite/contrib/metrics/precision_recall_curve.py | 9 +++------ ignite/contrib/metrics/roc_auc.py | 10 ++++++---- 4 files changed, 14 insertions(+), 23 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 1a90ff3f552d..dc0b691e2d99 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -374,11 +374,6 @@ def train_step(engine, batch): ('fc', nn.Linear(2, 1)) ])) -def sigmoid_output_transform(output): - y_pred, y = output - y_pred = torch.sigmoid(y_pred) - return y_pred, y - manual_seed(666) """ diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py index 9f1fefb57d43..5c91731f0251 100644 --- a/ignite/contrib/metrics/average_precision.py +++ b/ignite/contrib/metrics/average_precision.py @@ -35,14 +35,11 @@ class AveragePrecision(EpochMetric): .. testcode:: - def activated_output_transform(output): - y_pred, y = output - y_pred = torch.softmax(y_pred, dim=1) - return y_pred, y - y_pred = torch.Tensor([[3.89, 0.21], [0.30, 0.9], [0.46, 0.14], [0.16, 0.2]]) - y_true = torch.tensor([[1, 1], [1, 1], [1, 0], [0, 1]]) - - avg_precision = AveragePrecision(activated_output_transform) + y_pred = torch.Tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]]) + y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]]) + + avg_precision = AveragePrecision() + #The ``output_transform`` arg of the metric can be used to perform a softmax on the ``y_pred``. avg_precision.attach(default_evaluator, 'average_precision') state = default_evaluator.run([[y_pred, y_true]]) print(state.metrics['average_precision']) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 06fefc04cae1..39c8eabdc339 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -37,13 +37,10 @@ class PrecisionRecallCurve(EpochMetric): .. testcode:: - def activated_output_transform(output): - y_pred, y = output - y_pred = torch.sigmoid(y_pred) - return y_pred, y - y_pred = torch.tensor([-3, 0.4, 0.9, 8]) + y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 1]) - prec_recall_curve = PrecisionRecallCurve(activated_output_transform) + prec_recall_curve = PrecisionRecallCurve() + #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') state = default_evaluator.run([[y_pred, y_true]]) diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py index 58716d585d9b..e3c0991bdd8c 100644 --- a/ignite/contrib/metrics/roc_auc.py +++ b/ignite/contrib/metrics/roc_auc.py @@ -43,9 +43,10 @@ class ROC_AUC(EpochMetric): .. testcode:: - roc_auc = ROC_AUC(sigmoid_output_transform) + roc_auc = ROC_AUC() + #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. roc_auc.attach(default_evaluator, 'roc_auc') - y_pred = torch.tensor([[-3], [0.4], [0.9], [8]]) + y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]]) y_true = torch.tensor([[0], [0], [1], [0]]) state = default_evaluator.run([[y_pred, y_true]]) print(state.metrics['roc_auc']) @@ -93,9 +94,10 @@ class RocCurve(EpochMetric): .. testcode:: - roc_auc = RocCurve(sigmoid_output_transform) + roc_auc = RocCurve() + #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. roc_auc.attach(default_evaluator, 'roc_auc') - y_pred = torch.tensor([-3, 0.4, 0.9, 8]) + y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) y_true = torch.tensor([0, 0, 1, 0]) state = default_evaluator.run([[y_pred, y_true]]) print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()]) From de924d2e00dbbf92e3a1b1f5ba30b93f6ef7067d Mon Sep 17 00:00:00 2001 From: Ishan Kumar Date: Mon, 6 Dec 2021 19:52:02 +0530 Subject: [PATCH 5/5] added note about Transforms --- ignite/contrib/metrics/average_precision.py | 12 ++- .../contrib/metrics/precision_recall_curve.py | 41 +++++---- ignite/contrib/metrics/roc_auc.py | 86 ++++++++++++------- 3 files changed, 88 insertions(+), 51 deletions(-) diff --git a/ignite/contrib/metrics/average_precision.py b/ignite/contrib/metrics/average_precision.py index 5c91731f0251..3b92fccb9bbd 100644 --- a/ignite/contrib/metrics/average_precision.py +++ b/ignite/contrib/metrics/average_precision.py @@ -29,17 +29,25 @@ class AveragePrecision(EpochMetric): no issues. User will be warned in case there are any issues computing the function. device: optional device specification for internal storage. - Examples: + Note: AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below: + .. code-block:: python + + def activated_output_transform(output): + y_pred, y = output + y_pred = torch.softmax(y_pred, dim=1) + return y_pred, y + avg_precision = AveragePrecision(activated_output_transform) + + Examples: .. testcode:: y_pred = torch.Tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]]) y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]]) avg_precision = AveragePrecision() - #The ``output_transform`` arg of the metric can be used to perform a softmax on the ``y_pred``. avg_precision.attach(default_evaluator, 'average_precision') state = default_evaluator.run([[y_pred, y_true]]) print(state.metrics['average_precision']) diff --git a/ignite/contrib/metrics/precision_recall_curve.py b/ignite/contrib/metrics/precision_recall_curve.py index 39c8eabdc339..0d5efbc45bf1 100644 --- a/ignite/contrib/metrics/precision_recall_curve.py +++ b/ignite/contrib/metrics/precision_recall_curve.py @@ -32,27 +32,36 @@ class PrecisionRecallCurve(EpochMetric): #sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. - PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates - or confidence values. To apply an activation to y_pred, use output_transform as shown below: + Note: + PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates + or confidence values. To apply an activation to y_pred, use output_transform as shown below: - .. testcode:: + .. code-block:: python - y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) - y_true = torch.tensor([0, 0, 1, 1]) - prec_recall_curve = PrecisionRecallCurve() - #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. - prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') - state = default_evaluator.run([[y_pred, y_true]]) + def sigmoid_output_transform(output): + y_pred, y = output + y_pred = torch.sigmoid(y_pred) + return y_pred, y + avg_precision = PrecisionRecallCurve(sigmoid_output_transform) - print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) - print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) - print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) + Examples: + .. testcode:: - .. testoutput:: + y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) + y_true = torch.tensor([0, 0, 1, 1]) + prec_recall_curve = PrecisionRecallCurve() + prec_recall_curve.attach(default_evaluator, 'prec_recall_curve') + state = default_evaluator.run([[y_pred, y_true]]) - Precision [1.0, 1.0, 1.0] - Recall [1.0, 0.5, 0.0] - Thresholds [0.7109, 0.9997] + print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()]) + print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()]) + print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()]) + + .. testoutput:: + + Precision [1.0, 1.0, 1.0] + Recall [1.0, 0.5, 0.0] + Thresholds [0.7109, 0.9997] """ diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py index e3c0991bdd8c..b6ff2d39f227 100644 --- a/ignite/contrib/metrics/roc_auc.py +++ b/ignite/contrib/metrics/roc_auc.py @@ -38,22 +38,33 @@ class ROC_AUC(EpochMetric): no issues. User will be warned in case there are any issues computing the function. device: optional device specification for internal storage. - ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence - values. To apply an activation to y_pred, use output_transform as shown below: + Note: - .. testcode:: + ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence + values. To apply an activation to y_pred, use output_transform as shown below: - roc_auc = ROC_AUC() - #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. - roc_auc.attach(default_evaluator, 'roc_auc') - y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]]) - y_true = torch.tensor([[0], [0], [1], [0]]) - state = default_evaluator.run([[y_pred, y_true]]) - print(state.metrics['roc_auc']) + .. code-block:: python - .. testoutput:: + def sigmoid_output_transform(output): + y_pred, y = output + y_pred = torch.sigmoid(y_pred) + return y_pred, y + avg_precision = ROC_AUC(sigmoid_output_transform) - 0.6666... + Examples: + .. testcode:: + + roc_auc = ROC_AUC() + #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. + roc_auc.attach(default_evaluator, 'roc_auc') + y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]]) + y_true = torch.tensor([[0], [0], [1], [0]]) + state = default_evaluator.run([[y_pred, y_true]]) + print(state.metrics['roc_auc']) + + .. testoutput:: + + 0.6666... """ def __init__( @@ -88,27 +99,36 @@ class RocCurve(EpochMetric): `_ is run on the first batch of data to ensure there are no issues. User will be warned in case there are any issues computing the function. - - RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence - values. To apply an activation to y_pred, use output_transform as shown below: - - .. testcode:: - - roc_auc = RocCurve() - #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. - roc_auc.attach(default_evaluator, 'roc_auc') - y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) - y_true = torch.tensor([0, 0, 1, 0]) - state = default_evaluator.run([[y_pred, y_true]]) - print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()]) - print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()]) - print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()]) - - .. testoutput:: - - FPR [0.0, 0.333, 0.333, 1.0] - TPR [0.0, 0.0, 1.0, 1.0] - Thresholds [2.0, 1.0, 0.711, 0.047] + Note: + RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence + values. To apply an activation to y_pred, use output_transform as shown below: + + .. code-block:: python + + def sigmoid_output_transform(output): + y_pred, y = output + y_pred = torch.sigmoid(y_pred) + return y_pred, y + avg_precision = RocCurve(sigmoid_output_transform) + + Examples: + .. testcode:: + + roc_auc = RocCurve() + #The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``. + roc_auc.attach(default_evaluator, 'roc_auc') + y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997]) + y_true = torch.tensor([0, 0, 1, 0]) + state = default_evaluator.run([[y_pred, y_true]]) + print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()]) + print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()]) + print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()]) + + .. testoutput:: + + FPR [0.0, 0.333, 0.333, 1.0] + TPR [0.0, 0.0, 1.0, 1.0] + Thresholds [2.0, 1.0, 0.711, 0.047] """ def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None: