Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/average_precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def activated_output_transform(output):

.. testcode::

y_pred = torch.Tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_pred = torch.tensor([[0.79, 0.21], [0.30, 0.70], [0.46, 0.54], [0.16, 0.84]])
y_true = torch.tensor([[1, 1], [1, 1], [0, 1], [0, 1]])

avg_precision = AveragePrecision()
Expand Down
4 changes: 2 additions & 2 deletions ignite/contrib/metrics/cohen_kappa.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ class CohenKappa(EpochMetric):

metric = CohenKappa()
metric.attach(default_evaluator, 'ck')
y_true = torch.Tensor([2, 0, 2, 2, 0, 1])
y_pred = torch.Tensor([0, 0, 2, 2, 0, 2])
y_true = torch.tensor([2, 0, 2, 2, 0, 1])
y_pred = torch.tensor([0, 0, 2, 2, 0, 2])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['ck'])

Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/canberra_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class CanberraMetric(_BaseRegression):

metric = CanberraMetric()
metric.attach(default_evaluator, 'canberra')
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['canberra'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class FractionalAbsoluteError(_BaseRegression):

metric = FractionalAbsoluteError()
metric.attach(default_evaluator, 'fractional_abs_error')
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 0.8
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_abs_error'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/fractional_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class FractionalBias(_BaseRegression):

metric = FractionalBias()
metric.attach(default_evaluator, 'fractional_bias')
y_pred = torch.Tensor([[3.8], [9.9], [5.4], [2.1]])
y_pred = torch.tensor([[3.8], [9.9], [5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['fractional_bias'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class GeometricMeanAbsoluteError(_BaseRegression):

metric = GeometricMeanAbsoluteError()
metric.attach(default_evaluator, 'gmae')
y_pred = torch.Tensor([[3.8], [9.9], [-5.4], [2.1]])
y_pred = torch.tensor([[3.8], [9.9], [-5.4], [2.1]])
y_true = y_pred * 1.5
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmae'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ class GeometricMeanRelativeAbsoluteError(_BaseRegression):

metric = GeometricMeanRelativeAbsoluteError()
metric.attach(default_evaluator, 'gmare')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['gmare'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/manhattan_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class ManhattanDistance(_BaseRegression):

metric = ManhattanDistance()
metric.attach(default_evaluator, 'manhattan')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['manhattan'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class MaximumAbsoluteError(_BaseRegression):

metric = MaximumAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class MeanAbsoluteRelativeError(_BaseRegression):

metric = MeanAbsoluteRelativeError()
metric.attach(default_evaluator, 'mare')
y_true = torch.Tensor([1, 2, 3, 4, 5])
y_true = torch.tensor([1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mare'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/mean_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class MeanError(_BaseRegression):

metric = MeanError()
metric.attach(default_evaluator, 'me')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['me'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/mean_normalized_bias.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ class MeanNormalizedBias(_BaseRegression):

metric = MeanNormalizedBias()
metric.attach(default_evaluator, 'mnb')
y_true = torch.Tensor([1, 2, 3, 4, 5])
y_true = torch.tensor([1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mnb'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/median_absolute_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class MedianAbsoluteError(EpochMetric):

metric = MedianAbsoluteError()
metric.attach(default_evaluator, 'mae')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mae'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class MedianAbsolutePercentageError(EpochMetric):

metric = MedianAbsolutePercentageError()
metric.attach(default_evaluator, 'mape')
y_true = torch.Tensor([1, 2, 3, 4, 5])
y_true = torch.tensor([1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mape'])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class MedianRelativeAbsoluteError(EpochMetric):

metric = MedianRelativeAbsoluteError()
metric.attach(default_evaluator, 'mrae')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['mrae'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/r2_score.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class R2Score(_BaseRegression):

metric = R2Score()
metric.attach(default_evaluator, 'r2')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['r2'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/contrib/metrics/regression/wave_hedges_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class WaveHedgesDistance(_BaseRegression):

metric = WaveHedgesDistance()
metric.attach(default_evaluator, 'whd')
y_true = torch.Tensor([0, 1, 2, 3, 4, 5])
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['whd'])
Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/accumulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ class GeometricAverage(VariableAccumulation):
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 1. input is er
data = torch.Tensor([1, 2, 3])
data = torch.tensor([1, 2, 3])
state = default_evaluator.run(data)
print(state.metrics['avg'])

Expand Down
2 changes: 1 addition & 1 deletion ignite/metrics/mean_pairwise_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class MeanPairwiseDistance(Metric):

metric = MeanPairwiseDistance(p=4)
metric.attach(default_evaluator, 'mpd')
preds = torch.Tensor([
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
Expand Down
8 changes: 4 additions & 4 deletions ignite/metrics/precision.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ class Precision(_BasePrecisionRecall):
weighted_metric.attach(default_evaluator, "weighted precision")

y_true = torch.tensor([2, 0, 2, 1, 0])
y_pred = torch.Tensor([
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
Expand Down Expand Up @@ -320,14 +320,14 @@ class Precision(_BasePrecisionRecall):
weighted_metric.attach(default_evaluator, "weighted precision")
samples_metric.attach(default_evaluator, "samples precision")

y_true = torch.Tensor([
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.Tensor([
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
Expand Down Expand Up @@ -361,7 +361,7 @@ def thresholded_output_transform(output):
metric = Precision(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "precision")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.Tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["precision"])

Expand Down
2 changes: 1 addition & 1 deletion tests/ignite/contrib/handlers/test_clearml_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def test_optimizer_params_handler_wrong_setup():

def test_optimizer_params():

optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=ClearMLLogger)
mock_logger.clearml_logger = MagicMock()
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_mlflow_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a"])

mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5

mock_logger = MagicMock(spec=MLflowLogger)
Expand Down Expand Up @@ -216,7 +216,7 @@ def test_optimizer_params_handler_wrong_setup():

def test_optimizer_params():

optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=MLflowLogger)
mock_logger.log_metrics = MagicMock()
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_neptune_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_optimizer_params_handler_wrong_setup():


def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=NeptuneLogger)
mock_logger.log_metric = MagicMock()
Expand Down Expand Up @@ -99,7 +99,7 @@ def test_output_handler_metric_names():

mock_engine = MagicMock()
mock_logger.log_metric = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5

mock_logger = MagicMock(spec=NeptuneLogger)
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_polyaxon_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a"])

mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5

mock_logger = MagicMock(spec=PolyaxonLogger)
Expand Down Expand Up @@ -232,7 +232,7 @@ def test_optimizer_params_handler_wrong_setup():

def test_optimizer_params():

optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_tensorboard_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_optimizer_params_handler_wrong_setup():

def test_optimizer_params():

optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=TensorboardLogger)
mock_logger.writer = MagicMock()
Expand Down Expand Up @@ -104,7 +104,7 @@ def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a"])

mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5

mock_logger = MagicMock(spec=TensorboardLogger)
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_visdom_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def test_optimizer_params_handler_wrong_setup():

def test_optimizer_params():

optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=VisdomLogger)
mock_logger.vis = MagicMock()
Expand Down Expand Up @@ -181,7 +181,7 @@ def test_output_handler_metric_names(dirname):
wrapper = OutputHandler("tag", metric_names=["a"])

mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state = State(metrics={"a": torch.tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5

mock_logger = MagicMock(spec=VisdomLogger)
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/contrib/handlers/test_wandb_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_optimizer_params_handler_wrong_setup():


def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
Expand Down Expand Up @@ -272,7 +272,7 @@ def test_output_handler_state_attrs():


def test_wandb_close():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
optimizer = torch.optim.SGD([torch.tensor(0.0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=WandBLogger)
mock_logger.log = MagicMock()
Expand Down
2 changes: 1 addition & 1 deletion tests/ignite/metrics/gan/test_fid.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def test_wrong_inputs():
FID(num_features=-1, feature_extractor=torch.nn.Identity())

with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.Tensor([[], []]))
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.tensor([[], []]))

with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
FID(num_features=1, feature_extractor=torch.nn.Identity()).update(torch.rand(2, 0, 0))
Expand Down
4 changes: 2 additions & 2 deletions tests/ignite/metrics/test_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def update(self, output):


def test_output_as_mapping_without_criterion_kwargs():
y_pred = torch.Tensor([[2.0], [-2.0]])
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {}

Expand All @@ -42,7 +42,7 @@ def test_output_as_mapping_without_criterion_kwargs():


def test_output_as_mapping_with_criterion_kwargs():
y_pred = torch.Tensor([[2.0], [-2.0]])
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)
criterion_kwargs = {"reduction": "sum"}

Expand Down
2 changes: 1 addition & 1 deletion tests/ignite/metrics/test_mean_pairwise_distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def _test_distrib_accumulator_device(device):
for dev in [mpd._device, mpd._sum_of_distances.device]:
assert dev == metric_device, f"{type(dev)}:{dev} vs {type(metric_device)}:{metric_device}"

y_pred = torch.Tensor([[3.0, 4.0], [-3.0, -4.0]])
y_pred = torch.tensor([[3.0, 4.0], [-3.0, -4.0]])
y = torch.zeros(2, 2)
mpd.update((y_pred, y))

Expand Down
6 changes: 3 additions & 3 deletions tests/ignite/metrics/test_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def update(self, output):


def test_no_transform():
y_pred = torch.Tensor([[2.0], [-2.0]])
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)

metric = DummyMetric1(true_output=(y_pred, y))
Expand All @@ -40,7 +40,7 @@ def test_no_transform():


def test_transform():
y_pred = torch.Tensor([[2.0], [-2.0]])
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)

def transform(output):
Expand Down Expand Up @@ -87,7 +87,7 @@ def update(self, output):


def test_output_as_mapping():
y_pred = torch.Tensor([[2.0], [-2.0]])
y_pred = torch.tensor([[2.0], [-2.0]])
y = torch.zeros(2)

metric = DummyMetric1(true_output=(y_pred, y))
Expand Down