Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 12 additions & 6 deletions ignite/contrib/metrics/average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,20 @@ class AveragePrecision(EpochMetric):
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
confidence values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python
.. testcode::

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.softmax(y_pred, dim=1)
return y_pred, y
y_pred = torch.Tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])

avg_precision = AveragePrecision(activated_output_transform)
avg_precision = AveragePrecision()
#The ``output_transform`` arg of the metric can be used to perform a softmax on the ``y_pred``.
Copy link
Contributor

@sdesrozis sdesrozis Dec 5, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thinking about it, it is worth to keep the current example to show the way to respect the data format, maybe as a note or an example following the one you did. What do you think ?

Note:
     AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence values. To apply an activation to y_pred, use output_transform as shown below:

        .. code-block:: python

            def activated_output_transform(output):
                y_pred, y = output
                y_pred = torch.softmax(y_pred, dim=1)
                return y_pred, y

            avg_precision = AveragePrecision(activated_output_transform)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, makes sense to have it in the docs somewhere, I have added it now!

avg_precision.attach(default_evaluator, 'average_precision')
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['average_precision'])

.. testoutput::

0.9166...

"""

Expand Down
22 changes: 16 additions & 6 deletions ignite/contrib/metrics/precision_recall_curve.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,24 @@ class PrecisionRecallCurve(EpochMetric):
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python
.. testcode::

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])

roc_auc = PrecisionRecallCurve(activated_output_transform)
print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])

.. testoutput::

Precision [1.0, 1.0, 1.0]
Recall [1.0, 0.5, 0.0]
Thresholds [0.7109, 0.9997]

"""

Expand Down
36 changes: 24 additions & 12 deletions ignite/contrib/metrics/roc_auc.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,19 @@ class ROC_AUC(EpochMetric):
ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python
.. testcode::

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
roc_auc = ROC_AUC()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
y_true = torch.tensor([[0], [0], [1], [0]])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['roc_auc'])

roc_auc = ROC_AUC(activated_output_transform)
.. testoutput::

0.6666...
"""

def __init__(
Expand Down Expand Up @@ -88,15 +92,23 @@ class RocCurve(EpochMetric):
RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python
.. testcode::

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
roc_auc = RocCurve()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])

roc_auc = RocCurve(activated_output_transform)
.. testoutput::

FPR [0.0, 0.333, 0.333, 1.0]
TPR [0.0, 0.0, 1.0, 1.0]
Thresholds [2.0, 1.0, 0.711, 0.047]
"""

def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
Expand Down