Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions ignite/contrib/metrics/average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class AveragePrecision(EpochMetric):
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.

Examples:
Note:
AveragePrecision expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or
confidence values. To apply an activation to y_pred, use output_transform as shown below:

Expand All @@ -39,9 +39,23 @@ def activated_output_transform(output):
y_pred, y = output
y_pred = torch.softmax(y_pred, dim=1)
return y_pred, y

avg_precision = AveragePrecision(activated_output_transform)

Examples:
.. testcode::

y_pred = torch.Tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])

avg_precision = AveragePrecision()
avg_precision.attach(default_evaluator, 'average_precision')
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['average_precision'])

.. testoutput::

0.9166...

"""

def __init__(
Expand Down
35 changes: 27 additions & 8 deletions ignite/contrib/metrics/precision_recall_curve.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,36 @@ class PrecisionRecallCurve(EpochMetric):
#sklearn.metrics.precision_recall_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.

PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:
Note:
PrecisionRecallCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates
or confidence values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python
.. code-block:: python

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = PrecisionRecallCurve(sigmoid_output_transform)

roc_auc = PrecisionRecallCurve(activated_output_transform)
Examples:
.. testcode::

y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 1])
prec_recall_curve = PrecisionRecallCurve()
prec_recall_curve.attach(default_evaluator, 'prec_recall_curve')
state = default_evaluator.run([[y_pred, y_true]])

print("Precision", [round(i, 4) for i in state.metrics['prec_recall_curve'][0].tolist()])
print("Recall", [round(i, 4) for i in state.metrics['prec_recall_curve'][1].tolist()])
print("Thresholds", [round(i, 4) for i in state.metrics['prec_recall_curve'][2].tolist()])

.. testoutput::

Precision [1.0, 1.0, 1.0]
Recall [1.0, 0.5, 0.0]
Thresholds [0.7109, 0.9997]

"""

Expand Down
74 changes: 53 additions & 21 deletions ignite/contrib/metrics/roc_auc.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,33 @@ class ROC_AUC(EpochMetric):
no issues. User will be warned in case there are any issues computing the function.
device: optional device specification for internal storage.

ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:
Note:

.. code-block:: python
ROC_AUC expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
.. code-block:: python

roc_auc = ROC_AUC(activated_output_transform)
def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = ROC_AUC(sigmoid_output_transform)

Examples:
.. testcode::

roc_auc = ROC_AUC()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([[0.0474], [0.5987], [0.7109], [0.9997]])
y_true = torch.tensor([[0], [0], [1], [0]])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['roc_auc'])

.. testoutput::

0.6666...
"""

def __init__(
Expand Down Expand Up @@ -84,19 +99,36 @@ class RocCurve(EpochMetric):
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html#
sklearn.metrics.roc_curve>`_ is run on the first batch of data to ensure there are
no issues. User will be warned in case there are any issues computing the function.

RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python

def activated_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y

roc_auc = RocCurve(activated_output_transform)

Note:
RocCurve expects y to be comprised of 0's and 1's. y_pred must either be probability estimates or confidence
values. To apply an activation to y_pred, use output_transform as shown below:

.. code-block:: python

def sigmoid_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred)
return y_pred, y
avg_precision = RocCurve(sigmoid_output_transform)

Examples:
.. testcode::

roc_auc = RocCurve()
#The ``output_transform`` arg of the metric can be used to perform a sigmoid on the ``y_pred``.
roc_auc.attach(default_evaluator, 'roc_auc')
y_pred = torch.tensor([0.0474, 0.5987, 0.7109, 0.9997])
y_true = torch.tensor([0, 0, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print("FPR", [round(i, 3) for i in state.metrics['roc_auc'][0].tolist()])
print("TPR", [round(i, 3) for i in state.metrics['roc_auc'][1].tolist()])
print("Thresholds", [round(i, 3) for i in state.metrics['roc_auc'][2].tolist()])

.. testoutput::

FPR [0.0, 0.333, 0.333, 1.0]
TPR [0.0, 0.0, 1.0, 1.0]
Thresholds [2.0, 1.0, 0.711, 0.047]
"""

def __init__(self, output_transform: Callable = lambda x: x, check_compute_fn: bool = False) -> None:
Expand Down