diff --git a/.github/workflows/trigger_circle_ci.py b/.github/workflows/trigger_circle_ci.py
index 28ad6675b0d3..ff8ce3dddd86 100644
--- a/.github/workflows/trigger_circle_ci.py
+++ b/.github/workflows/trigger_circle_ci.py
@@ -24,7 +24,7 @@ def trigger_new_pipeline(data, headers):
"https://circleci.com/api/v2/project/gh/pytorch/ignite/pipeline", data=json.dumps(data), headers=headers
)
assert_result(result, 201)
- output = get_output(result.text, ["id",])
+ output = get_output(result.text, ["id"])
return output["id"]
@@ -46,7 +46,7 @@ def get_workflow_id(pipeline_id, headers):
while True:
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}/workflow", headers=headers)
assert_result(result, 200)
- output = get_output(result.text, ["items",])
+ output = get_output(result.text, ["items"])
items = output["items"]
if len(items) > 1:
raise RuntimeError(f"Incorrect number of workflow ids: {len(items)} != 1\n" f"items: {items}")
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index eeef4fb23814..bc889ea2189a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,7 +15,7 @@ repos:
exclude_types: ["python", "jupyter", "shell", "gitignore"]
- repo: https://github.com/python/black
- rev: 19.10b0
+ rev: 21.12b0
hooks:
- id: black
language_version: python3.8
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 54b23fcf202d..ae05a721cf00 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -109,11 +109,11 @@ If you modify the code, you will most probably also need to code some tests to e
- naming convention for files `test_*.py`, e.g. `test_precision.py`
- naming of testing functions `def test_*`, e.g. `def test_precision_on_random_data()`
- - if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
+ - if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
Additionally, we may want to decorate it with `@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")`.
For more examples, please see https://github.com/pytorch/ignite/blob/master/tests/ignite/engine/test_create_supervised.py
- - if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
- conditions depending on the intended checks. For example, please see
+ - if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
+ conditions depending on the intended checks. For example, please see
https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_accuracy.py
@@ -131,7 +131,7 @@ format and check codebase for compliance with PEP8.
If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke
black manually to format files and commit them.
-To install `flake8`, `black==19.10b0`, `isort==5.7.0` and `mypy`, please run
+To install `flake8`, `black==21.12b0`, `isort==5.7.0` and `mypy`, please run
```bash
bash ./tests/run_code_style.sh install
```
diff --git a/docker/test_image.py b/docker/test_image.py
index a0407599281c..c02cd1448558 100644
--- a/docker/test_image.py
+++ b/docker/test_image.py
@@ -33,7 +33,7 @@ def main():
print(traceback.format_exc())
"""
try:
- out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True,)
+ out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True)
assert isinstance(out, bytes), type(out)
out = out.decode("utf-8").strip()
diff --git a/examples/contrib/cifar10/main.py b/examples/contrib/cifar10/main.py
index a951eaf5d8e2..34c1ee59e5e5 100644
--- a/examples/contrib/cifar10/main.py
+++ b/examples/contrib/cifar10/main.py
@@ -223,11 +223,11 @@ def get_dataflow(config):
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
- train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
+ train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
- test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
+ test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
diff --git a/examples/contrib/cifar10/utils.py b/examples/contrib/cifar10/utils.py
index ccf712c21299..09e265a7a17a 100644
--- a/examples/contrib/cifar10/utils.py
+++ b/examples/contrib/cifar10/utils.py
@@ -13,7 +13,7 @@
]
)
-test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
+test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def get_train_test_datasets(path):
diff --git a/examples/contrib/cifar10_qat/main.py b/examples/contrib/cifar10_qat/main.py
index e1ac9bbfb810..196f0c87226d 100644
--- a/examples/contrib/cifar10_qat/main.py
+++ b/examples/contrib/cifar10_qat/main.py
@@ -208,11 +208,11 @@ def get_dataflow(config):
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
- train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
+ train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
- test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
+ test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
diff --git a/examples/contrib/cifar10_qat/utils.py b/examples/contrib/cifar10_qat/utils.py
index 7df754ade47e..efceeb0ddcea 100644
--- a/examples/contrib/cifar10_qat/utils.py
+++ b/examples/contrib/cifar10_qat/utils.py
@@ -17,7 +17,7 @@
]
)
-test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
+test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
def get_train_test_datasets(path):
diff --git a/examples/contrib/transformers/main.py b/examples/contrib/transformers/main.py
index 17bd01777eb1..b671169c923f 100644
--- a/examples/contrib/transformers/main.py
+++ b/examples/contrib/transformers/main.py
@@ -228,11 +228,11 @@ def get_dataflow(config):
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
- train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
+ train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)
test_loader = idist.auto_dataloader(
- test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
+ test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader
@@ -246,7 +246,7 @@ def initialize(config):
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
- optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"],)
+ optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"])
optimizer = idist.auto_optim(optimizer)
criterion = nn.BCEWithLogitsLoss()
diff --git a/examples/gan/dcgan.py b/examples/gan/dcgan.py
index 7f3d40e83cfc..274e291f40ca 100644
--- a/examples/gan/dcgan.py
+++ b/examples/gan/dcgan.py
@@ -35,7 +35,7 @@
class Net(nn.Module):
- """ A base class for both generator and the discriminator.
+ """A base class for both generator and the discriminator.
Provides a common weight initialization scheme.
"""
@@ -56,7 +56,7 @@ def forward(self, x):
class Generator(Net):
- """ Generator network.
+ """Generator network.
Args:
nf (int): Number of filters in the second-to-last deconv layer
@@ -95,7 +95,7 @@ def forward(self, x):
class Discriminator(Net):
- """ Discriminator network.
+ """Discriminator network.
Args:
nf (int): Number of filters in the first conv layer.
@@ -133,9 +133,7 @@ def forward(self, x):
def check_manual_seed(seed):
- """ If manual seed is not specified, choose a random one and communicate it to the user.
-
- """
+ """If manual seed is not specified, choose a random one and communicate it to the user."""
seed = seed or random.randint(1, 10000)
random.seed(seed)
@@ -311,8 +309,8 @@ def step(engine, batch):
@trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))
def print_logs(engine):
fname = os.path.join(output_dir, LOGS_FNAME)
- columns = ["iteration",] + list(engine.state.metrics.keys())
- values = [str(engine.state.iteration),] + [str(round(value, 5)) for value in engine.state.metrics.values()]
+ columns = ["iteration"] + list(engine.state.metrics.keys())
+ values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]
with open(fname, "a") as f:
if f.tell() == 0:
diff --git a/examples/mnist/mnist_save_resume_engine.py b/examples/mnist/mnist_save_resume_engine.py
index 7c7d403cd7c8..310a2e001652 100644
--- a/examples/mnist/mnist_save_resume_engine.py
+++ b/examples/mnist/mnist_save_resume_engine.py
@@ -50,8 +50,7 @@ def forward(self, x):
def get_data_loaders(train_batch_size, val_batch_size):
- """Method to setup data loaders: train_loader and val_loader
- """
+ """Method to setup data loaders: train_loader and val_loader"""
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
@@ -71,8 +70,7 @@ def get_data_loaders(train_batch_size, val_batch_size):
def log_model_weights(engine, model=None, fp=None, **kwargs):
- """Helper method to log norms of model weights: print and dump into a file
- """
+ """Helper method to log norms of model weights: print and dump into a file"""
assert model and fp
output = {"total": 0.0}
max_counter = 5
@@ -92,8 +90,7 @@ def log_model_weights(engine, model=None, fp=None, **kwargs):
def log_model_grads(engine, model=None, fp=None, **kwargs):
- """Helper method to log norms of model gradients: print and dump into a file
- """
+ """Helper method to log norms of model gradients: print and dump into a file"""
assert model and fp
output = {"grads/total": 0.0}
max_counter = 5
@@ -116,8 +113,7 @@ def log_model_grads(engine, model=None, fp=None, **kwargs):
def log_data_stats(engine, fp=None, **kwargs):
- """Helper method to log mean/std of input batch of images and median of batch of targets.
- """
+ """Helper method to log mean/std of input batch of images and median of batch of targets."""
assert fp
x, y = engine.state.batch
output = {
diff --git a/examples/mnist/mnist_with_tensorboard_on_tpu.py b/examples/mnist/mnist_with_tensorboard_on_tpu.py
index 38eaabe09a92..1aecc4efde0b 100644
--- a/examples/mnist/mnist_with_tensorboard_on_tpu.py
+++ b/examples/mnist/mnist_with_tensorboard_on_tpu.py
@@ -84,7 +84,7 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, lo
# Create trainer and evaluator
trainer = create_supervised_trainer(
- model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item(),]
+ model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item()]
)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
diff --git a/examples/references/classification/imagenet/code/dataflow/dataloaders.py b/examples/references/classification/imagenet/code/dataflow/dataloaders.py
index 572272e76dfa..8097b724d105 100644
--- a/examples/references/classification/imagenet/code/dataflow/dataloaders.py
+++ b/examples/references/classification/imagenet/code/dataflow/dataloaders.py
@@ -52,16 +52,16 @@ def get_train_val_loaders(
train_eval_ds = train_ds
train_loader = idist.auto_dataloader(
- train_ds, shuffle=True, batch_size=batch_size, num_workers=num_workers, drop_last=True,
+ train_ds, shuffle=True, batch_size=batch_size, num_workers=num_workers, drop_last=True
)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
val_loader = idist.auto_dataloader(
- val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False,
+ val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False
)
train_eval_loader = idist.auto_dataloader(
- train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False,
+ train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False
)
return train_loader, val_loader, train_eval_loader
diff --git a/examples/references/classification/imagenet/code/scripts/training.py b/examples/references/classification/imagenet/code/scripts/training.py
index 33d9f2ba3c0f..f66c978c8094 100644
--- a/examples/references/classification/imagenet/code/scripts/training.py
+++ b/examples/references/classification/imagenet/code/scripts/training.py
@@ -61,7 +61,7 @@ def train_update_function(engine, batch):
"supervised batch loss": loss.item(),
}
- output_names = getattr(config, "output_names", ["supervised batch loss",])
+ output_names = getattr(config, "output_names", ["supervised batch loss"])
lr_scheduler = config.lr_scheduler
trainer = Engine(train_update_function)
@@ -97,7 +97,7 @@ def create_evaluators(model, metrics, config):
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
- output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
+ output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
diff --git a/examples/references/classification/imagenet/code/utils/exp_tracking.py b/examples/references/classification/imagenet/code/utils/exp_tracking.py
index 36bbc4d37f69..b81f23a38d7e 100644
--- a/examples/references/classification/imagenet/code/utils/exp_tracking.py
+++ b/examples/references/classification/imagenet/code/utils/exp_tracking.py
@@ -69,9 +69,7 @@ def _plx_log_params(params_dict):
from polyaxon_client.tracking import Experiment
plx_exp = Experiment()
- plx_exp.log_params(
- **{"pytorch version": torch.__version__, "ignite version": ignite.__version__,}
- )
+ plx_exp.log_params(**{"pytorch version": torch.__version__, "ignite version": ignite.__version__})
plx_exp.log_params(**params_dict)
@@ -86,9 +84,7 @@ def _mlflow_log_artifact(fp):
@idist.one_rank_only()
def _mlflow_log_params(params_dict):
- mlflow.log_params(
- {"pytorch version": torch.__version__, "ignite version": ignite.__version__,}
- )
+ mlflow.log_params({"pytorch version": torch.__version__, "ignite version": ignite.__version__})
mlflow.log_params(params_dict)
diff --git a/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101.py b/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101.py
index 1dae8b2109bc..2746ca5cd244 100644
--- a/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101.py
+++ b/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101.py
@@ -106,7 +106,7 @@ def model_output_transform(output):
nesterov = False
optimizer = optim.SGD(
- [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()},],
+ [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
diff --git a/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101_sbd.py b/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101_sbd.py
index c325e2e6e22d..46ec22116cff 100644
--- a/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101_sbd.py
+++ b/examples/references/segmentation/pascal_voc2012/configs/baseline_dplv3_resnet101_sbd.py
@@ -110,7 +110,7 @@ def model_output_transform(output):
nesterov = False
optimizer = optim.SGD(
- [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()},],
+ [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
diff --git a/examples/references/segmentation/pascal_voc2012/dataflow.py b/examples/references/segmentation/pascal_voc2012/dataflow.py
index 93c4a42cebae..6faf0c5a8948 100644
--- a/examples/references/segmentation/pascal_voc2012/dataflow.py
+++ b/examples/references/segmentation/pascal_voc2012/dataflow.py
@@ -66,7 +66,7 @@ def __getitem__(self, index):
return {
"image": img,
"mask": mask,
- "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index],},
+ "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
@@ -93,7 +93,7 @@ def __getitem__(self, index):
return {
"image": img,
"mask": mask,
- "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index],},
+ "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}
return {"image": img, "mask": mask}
@@ -101,12 +101,12 @@ def __getitem__(self, index):
def get_train_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(
- root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta,
+ root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta
)
def get_val_dataset(root_path, return_meta=False):
- return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta,)
+ return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta)
def get_train_noval_sbdataset(root_path, return_meta=False):
@@ -187,7 +187,7 @@ def get_train_val_loaders(
def get_inference_dataloader(
- root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None,
+ root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None
):
assert mode in ("train", "test"), "Mode should be 'train' or 'test'"
diff --git a/examples/references/segmentation/pascal_voc2012/main.py b/examples/references/segmentation/pascal_voc2012/main.py
index ef7cba0d3e71..e30c17054044 100644
--- a/examples/references/segmentation/pascal_voc2012/main.py
+++ b/examples/references/segmentation/pascal_voc2012/main.py
@@ -138,7 +138,7 @@ def custom_event_filter(_, val_iteration):
tb_logger.attach(
evaluator,
log_handler=vis.predictions_gt_images_handler(
- img_denormalize_fn=img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation",
+ img_denormalize_fn=img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
@@ -296,7 +296,7 @@ def setup_experiment_tracking(config, with_clearml, task_type="training"):
schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema
- task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type,)
+ task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type)
task.connect_configuration(config.config_filepath.as_posix())
task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix())
@@ -313,12 +313,8 @@ def setup_experiment_tracking(config, with_clearml, task_type="training"):
output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S")
output_path.mkdir(parents=True, exist_ok=True)
- shutil.copyfile(
- config.script_filepath.as_posix(), output_path / config.script_filepath.name,
- )
- shutil.copyfile(
- config.config_filepath.as_posix(), output_path / config.config_filepath.name,
- )
+ shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name)
+ shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name)
output_path = output_path.as_posix()
return Path(idist.broadcast(output_path, src=0))
@@ -419,9 +415,7 @@ def evaluation(local_rank, config, logger, with_clearml):
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix())
- tb_logger.attach_output_handler(
- evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all",
- )
+ tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all")
# Log confusion matrix to ClearML:
if with_clearml:
@@ -473,6 +467,4 @@ def run_evaluation(config_filepath, backend="nccl", with_clearml=True):
if __name__ == "__main__":
- fire.Fire(
- {"download": download_datasets, "training": run_training, "eval": run_evaluation,}
- )
+ fire.Fire({"download": download_datasets, "training": run_training, "eval": run_evaluation})
diff --git a/examples/references/segmentation/pascal_voc2012/utils.py b/examples/references/segmentation/pascal_voc2012/utils.py
index 99aada55795b..8fcb5060c01b 100644
--- a/examples/references/segmentation/pascal_voc2012/utils.py
+++ b/examples/references/segmentation/pascal_voc2012/utils.py
@@ -21,7 +21,7 @@ def initialize(config):
import horovod.torch as hvd
optimizer = hvd.DistributedOptimizer(
- optimizer, named_parameters=model.named_parameters(), backward_passes_per_step=accumulation_steps,
+ optimizer, named_parameters=model.named_parameters(), backward_passes_per_step=accumulation_steps
)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
if accumulation_steps > 1:
diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py
index 53246d8e1605..31c241b2b181 100644
--- a/ignite/contrib/engines/common.py
+++ b/ignite/contrib/engines/common.py
@@ -562,8 +562,7 @@ def setup_trains_logging(
log_every_iters: int = 100,
**kwargs: Any,
) -> ClearMLLogger:
- """``setup_trains_logging`` was renamed to :func:`~ignite.contrib.engines.common.setup_clearml_logging`.
- """
+ """``setup_trains_logging`` was renamed to :func:`~ignite.contrib.engines.common.setup_clearml_logging`."""
warnings.warn("setup_trains_logging was renamed to setup_clearml_logging.")
return setup_clearml_logging(trainer, optimizers, evaluators, log_every_iters, **kwargs)
diff --git a/ignite/contrib/handlers/base_logger.py b/ignite/contrib/handlers/base_logger.py
index 48e9bf639fbd..4c19f77bbd49 100644
--- a/ignite/contrib/handlers/base_logger.py
+++ b/ignite/contrib/handlers/base_logger.py
@@ -84,8 +84,7 @@ def global_step_transform(engine: Engine, event_name: Union[str, Events]) -> int
def _setup_output_metrics_state_attrs(
self, engine: Engine, log_text: Optional[bool] = False, key_tuple: Optional[bool] = True
) -> Dict[Any, Any]:
- """Helper method to setup metrics and state attributes to log
- """
+ """Helper method to setup metrics and state attributes to log"""
metrics_state_attrs = OrderedDict()
if self.metric_names is not None:
if isinstance(self.metric_names, str) and self.metric_names == "all":
diff --git a/ignite/contrib/handlers/neptune_logger.py b/ignite/contrib/handlers/neptune_logger.py
index 27fb4f3158f2..1dd4c41795c3 100644
--- a/ignite/contrib/handlers/neptune_logger.py
+++ b/ignite/contrib/handlers/neptune_logger.py
@@ -463,7 +463,7 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str,
name = name.replace(".", "/")
logger.log_metric(
- f"{tag_prefix}weights_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.data),
+ f"{tag_prefix}weights_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.data)
)
@@ -516,7 +516,7 @@ def __call__(self, engine: Engine, logger: NeptuneLogger, event_name: Union[str,
name = name.replace(".", "/")
logger.log_metric(
- f"{tag_prefix}grads_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.grad),
+ f"{tag_prefix}grads_{self.reduction.__name__}/{name}", x=global_step, y=self.reduction(p.grad)
)
diff --git a/ignite/contrib/handlers/tensorboard_logger.py b/ignite/contrib/handlers/tensorboard_logger.py
index 24dd6c9e9327..06a6dc6a5797 100644
--- a/ignite/contrib/handlers/tensorboard_logger.py
+++ b/ignite/contrib/handlers/tensorboard_logger.py
@@ -436,7 +436,7 @@ def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[
name = name.replace(".", "/")
logger.writer.add_histogram(
- tag=f"{tag_prefix}weights/{name}", values=p.data.detach().cpu().numpy(), global_step=global_step,
+ tag=f"{tag_prefix}weights/{name}", values=p.data.detach().cpu().numpy(), global_step=global_step
)
diff --git a/ignite/contrib/handlers/visdom_logger.py b/ignite/contrib/handlers/visdom_logger.py
index 0efeba817083..b5e2b4337f65 100644
--- a/ignite/contrib/handlers/visdom_logger.py
+++ b/ignite/contrib/handlers/visdom_logger.py
@@ -417,7 +417,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler, _BaseVisDrawer):
"""
def __init__(
- self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False,
+ self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, show_legend: bool = False
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
@@ -467,7 +467,7 @@ class WeightsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""
def __init__(
- self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False,
+ self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
@@ -516,7 +516,7 @@ class GradsScalarHandler(BaseWeightsScalarHandler, _BaseVisDrawer):
"""
def __init__(
- self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False,
+ self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None, show_legend: bool = False
):
super(GradsScalarHandler, self).__init__(model, reduction, tag)
_BaseVisDrawer.__init__(self, show_legend=show_legend)
diff --git a/ignite/contrib/handlers/wandb_logger.py b/ignite/contrib/handlers/wandb_logger.py
index c46808fac758..ffe677b3cf99 100644
--- a/ignite/contrib/handlers/wandb_logger.py
+++ b/ignite/contrib/handlers/wandb_logger.py
@@ -335,7 +335,7 @@ class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""
def __init__(
- self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None,
+ self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None, sync: Optional[bool] = None
):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
self.sync = sync
diff --git a/ignite/contrib/metrics/regression/median_absolute_error.py b/ignite/contrib/metrics/regression/median_absolute_error.py
index 8ada851a1811..4fa8f9d84b25 100644
--- a/ignite/contrib/metrics/regression/median_absolute_error.py
+++ b/ignite/contrib/metrics/regression/median_absolute_error.py
@@ -60,9 +60,9 @@ class MedianAbsoluteError(EpochMetric):
"""
def __init__(
- self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu"),
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
super(MedianAbsoluteError, self).__init__(
- median_absolute_error_compute_fn, output_transform=output_transform, device=device,
+ median_absolute_error_compute_fn, output_transform=output_transform, device=device
)
diff --git a/ignite/contrib/metrics/roc_auc.py b/ignite/contrib/metrics/roc_auc.py
index b6ff2d39f227..99706ddc3e0f 100644
--- a/ignite/contrib/metrics/roc_auc.py
+++ b/ignite/contrib/metrics/roc_auc.py
@@ -80,7 +80,7 @@ def __init__(
raise RuntimeError("This contrib module requires sklearn to be installed.")
super(ROC_AUC, self).__init__(
- roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device,
+ roc_auc_compute_fn, output_transform=output_transform, check_compute_fn=check_compute_fn, device=device
)
diff --git a/ignite/distributed/comp_models/base.py b/ignite/distributed/comp_models/base.py
index 2b083ecfbb6c..6b58d7069df4 100644
--- a/ignite/distributed/comp_models/base.py
+++ b/ignite/distributed/comp_models/base.py
@@ -95,7 +95,7 @@ def _encode_str(x: str, device: torch.device, size: int) -> torch.Tensor:
return padded_x.unsqueeze(0)
def _get_max_length(self, x: str, device: torch.device) -> int:
- size = torch.tensor([len(x),], device=device)
+ size = torch.tensor([len(x)], device=device)
size = self._do_all_reduce(size, "MAX")
return cast(int, size.item())
@@ -245,7 +245,7 @@ def broadcast(
if rank != src:
tensor = torch.empty(1, device=device, dtype=torch.float)
else:
- tensor = torch.tensor([tensor,], device=device, dtype=torch.float)
+ tensor = torch.tensor([tensor], device=device, dtype=torch.float)
elif isinstance(tensor, str):
tensor_to_str = True
max_length = self._get_max_length(tensor, device)
@@ -281,8 +281,7 @@ def barrier(self) -> None:
class _SerialModel(ComputationModel):
- """Private class defines non-distributed computation model for code compatibility with other distributed models.
- """
+ """Private class defines non-distributed computation model for code compatibility with other distributed models."""
name = "serial"
available_backends = ()
diff --git a/ignite/distributed/comp_models/horovod.py b/ignite/distributed/comp_models/horovod.py
index 0e313ed81ad1..dcfc227d1437 100644
--- a/ignite/distributed/comp_models/horovod.py
+++ b/ignite/distributed/comp_models/horovod.py
@@ -25,8 +25,7 @@
HOROVOD = "horovod"
class _HorovodDistModel(ComputationModel):
- """Private class for `Horovod `_ distributed computation model.
- """
+ """Private class for `Horovod `_ distributed computation model."""
name = "horovod-dist"
@@ -60,8 +59,7 @@ def create_from_backend(backend: str = HOROVOD, **kwargs: Any) -> "_HorovodDistM
return _HorovodDistModel(backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any) -> None:
- """This is a private method. Please, use `create_from_backend` or `create_from_context`
- """
+ """This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_HorovodDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
diff --git a/ignite/distributed/comp_models/native.py b/ignite/distributed/comp_models/native.py
index 0b568497a5f3..b1fd40587cb5 100644
--- a/ignite/distributed/comp_models/native.py
+++ b/ignite/distributed/comp_models/native.py
@@ -82,8 +82,7 @@ def __init__(
rank: Optional[int] = None,
**kwargs: Any,
) -> None:
- """This is a private method. Please, use `create_from_backend` or `create_from_context`
- """
+ """This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_NativeDistModel, self).__init__()
self._env_backup = None # type: Optional[Dict[str, str]]
self._local_rank = None # type: Optional[int]
@@ -177,7 +176,7 @@ def _compute_node_and_local_ranks(rank: int, hostnames: List[Tuple[str, ...]]) -
from collections import Counter
c = Counter(hostnames) # type: Counter
- sizes = torch.tensor([0,] + list(c.values()))
+ sizes = torch.tensor([0] + list(c.values()))
cumsum_sizes = torch.cumsum(sizes, dim=0)
node_rank = (rank // cumsum_sizes[1:]).clamp(0, 1).sum().item()
local_rank = rank - cumsum_sizes[node_rank].item()
@@ -503,8 +502,7 @@ def _expand_hostlist(nodelist: str) -> List[str]:
return result_hostlist
def _setup_ddp_vars_from_slurm_env(environ: Dict[str, str]) -> Dict[str, Union[str, int]]:
- """Method to setup DDP env vars required by PyTorch from SLURM env
- """
+ """Method to setup DDP env vars required by PyTorch from SLURM env"""
# 1) Tools like enroot can have hooks to translate slurm env vars to RANK, LOCAL_RANK, WORLD_SIZE etc
# See https://github.com/NVIDIA/enroot/blob/v3.1.0/conf/hooks/extra/50-slurm-pytorch.sh
# 2) User can use torch.distributed.launch tool to schedule on N local GPUs using 1 node, 1 task by SLURM
diff --git a/ignite/distributed/comp_models/xla.py b/ignite/distributed/comp_models/xla.py
index 58feca3d131f..ebf55240f4f3 100644
--- a/ignite/distributed/comp_models/xla.py
+++ b/ignite/distributed/comp_models/xla.py
@@ -45,8 +45,7 @@ def create_from_backend(backend: str = XLA_TPU, **kwargs: Any) -> "_XlaDistModel
return _XlaDistModel(backend=backend, **kwargs)
def __init__(self, backend: Optional[str] = None, **kwargs: Any):
- """This is a private method. Please, use `create_from_backend` or `create_from_context`
- """
+ """This is a private method. Please, use `create_from_backend` or `create_from_context`"""
super(_XlaDistModel, self).__init__()
if backend is not None:
self._create_from_backend(backend, **kwargs)
@@ -65,7 +64,7 @@ def _init_from_context(self) -> None:
def _compute_nproc_per_node(self) -> int:
tensor = torch.tensor([self.get_local_rank() + 1.0], dtype=torch.float).to(self.device())
- xm.all_reduce("max", [tensor,])
+ xm.all_reduce("max", [tensor])
return int(tensor.item())
def get_local_rank(self) -> int:
@@ -142,7 +141,7 @@ def _do_all_reduce(self, tensor: torch.Tensor, op: str = "SUM") -> torch.Tensor:
if op not in self._reduce_op_map:
raise ValueError(f"Unsupported reduction operation: '{op}'")
op = self._reduce_op_map[op]
- xm.all_reduce(op, [tensor,])
+ xm.all_reduce(op, [tensor])
return tensor
def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:
@@ -150,14 +149,14 @@ def _do_all_gather(self, tensor: torch.Tensor) -> torch.Tensor:
group_size = self.get_world_size()
output = torch.zeros((group_size,) + tensor.shape, dtype=tensor.dtype, device=tensor.device)
output[self.get_rank() % group_size] = tensor
- xm.all_reduce("sum", [output,])
+ xm.all_reduce("sum", [output])
return output.reshape(-1, *output.shape[2:])
def _do_broadcast(self, tensor: torch.Tensor, src: int) -> torch.Tensor:
# from https://github.com/jysohn23/xla/blob/model-parallel-colab/Gather_Scatter_Broadcast_PyTorch_XLA.ipynb
if src != self.get_rank():
tensor.fill_(0.0)
- xm.all_reduce("sum", [tensor,])
+ xm.all_reduce("sum", [tensor])
return tensor
def barrier(self) -> None:
diff --git a/ignite/distributed/launcher.py b/ignite/distributed/launcher.py
index 41dbe3b90e81..86b811f7b432 100644
--- a/ignite/distributed/launcher.py
+++ b/ignite/distributed/launcher.py
@@ -327,9 +327,7 @@ def __enter__(self) -> "Parallel":
f"Initialized distributed launcher with backend: '{self.backend}'"
)
msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None])
- self._logger.info( # type: ignore[attr-defined]
- f"- Parameters to spawn processes: \n\t{msg}"
- )
+ self._logger.info(f"- Parameters to spawn processes: \n\t{msg}") # type: ignore[attr-defined]
return self
diff --git a/ignite/distributed/utils.py b/ignite/distributed/utils.py
index 9505756ba6c2..04f8c86b6ef7 100644
--- a/ignite/distributed/utils.py
+++ b/ignite/distributed/utils.py
@@ -133,8 +133,7 @@ def model_name() -> str:
def get_world_size() -> int:
- """Returns world size of current distributed configuration. Returns 1 if no distributed configuration.
- """
+ """Returns world size of current distributed configuration. Returns 1 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
@@ -142,8 +141,7 @@ def get_world_size() -> int:
def get_rank() -> int:
- """Returns process rank within current distributed configuration. Returns 0 if no distributed configuration.
- """
+ """Returns process rank within current distributed configuration. Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
@@ -151,8 +149,8 @@ def get_rank() -> int:
def get_local_rank() -> int:
- """Returns local process rank within current distributed configuration. Returns 0 if no distributed configuration.
- """
+ """Returns local process rank within current distributed configuration.
+ Returns 0 if no distributed configuration."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
@@ -190,8 +188,7 @@ def get_node_rank() -> int:
def hostname() -> str:
- """Returns host name for current process within current distributed configuration.
- """
+ """Returns host name for current process within current distributed configuration."""
return socket.gethostname()
@@ -422,8 +419,7 @@ def broadcast(
def barrier() -> None:
- """Helper method to synchronize all processes.
- """
+ """Helper method to synchronize all processes."""
if _need_to_sync and isinstance(_model, _SerialModel):
sync(temporary=True)
@@ -543,8 +539,7 @@ def finalize() -> None:
def show_config() -> None:
- """Helper method to display distributed configuration via ``logging``.
- """
+ """Helper method to display distributed configuration via ``logging``."""
# setup parallel logger
logger = setup_logger(__name__)
diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
index 89e0a8c7ed48..c3c1a80c5621 100644
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -33,9 +33,7 @@
def _prepare_batch(
batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False
) -> Tuple[Union[torch.Tensor, Sequence, Mapping, str, bytes], ...]:
- """Prepare batch for training: pass to a device with options.
-
- """
+ """Prepare batch for training: pass to a device with options."""
x, y = batch
return (
convert_tensor(x, device=device, non_blocking=non_blocking),
diff --git a/ignite/engine/engine.py b/ignite/engine/engine.py
index 6006f530226c..dea2b158a6ab 100644
--- a/ignite/engine/engine.py
+++ b/ignite/engine/engine.py
@@ -446,14 +446,14 @@ def fire_event(self, event_name: Any) -> None:
return self._fire_event(event_name)
def terminate(self) -> None:
- """Sends terminate signal to the engine, so that it terminates completely the run after the current iteration.
- """
+ """Sends terminate signal to the engine, so that it terminates completely the run after
+ the current iteration."""
self.logger.info("Terminate signaled. Engine will stop after current iteration is finished.")
self.should_terminate = True
def terminate_epoch(self) -> None:
- """Sends terminate signal to the engine, so that it terminates the current epoch after the current iteration.
- """
+ """Sends terminate signal to the engine, so that it terminates the current epoch
+ after the current iteration."""
self.logger.info(
"Terminate current epoch is signaled. "
"Current epoch iteration will stop after current iteration is finished."
diff --git a/ignite/handlers/checkpoint.py b/ignite/handlers/checkpoint.py
index 2f7e6843149f..9a366c4f6ec5 100644
--- a/ignite/handlers/checkpoint.py
+++ b/ignite/handlers/checkpoint.py
@@ -451,7 +451,7 @@ def _setup_checkpoint(self) -> Dict[str, Dict[Any, Any]]:
@staticmethod
def setup_filename_pattern(
- with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True,
+ with_prefix: bool = True, with_score: bool = True, with_score_name: bool = True, with_global_step: bool = True
) -> str:
"""Helper method to get the default filename pattern for a checkpoint.
diff --git a/ignite/handlers/lr_finder.py b/ignite/handlers/lr_finder.py
index 665e73911597..cdde715faa03 100644
--- a/ignite/handlers/lr_finder.py
+++ b/ignite/handlers/lr_finder.py
@@ -288,7 +288,7 @@ def plot(
]
for lr in sug_lr:
ax.scatter(
- lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3,
+ lr, corresponding_loss, color="red" if len(sug_lr) == 1 else None, s=75, marker="o", zorder=3
)
# handle skip_end=0 properly
diff --git a/ignite/handlers/param_scheduler.py b/ignite/handlers/param_scheduler.py
index 615fe9bc40df..be0d50795262 100644
--- a/ignite/handlers/param_scheduler.py
+++ b/ignite/handlers/param_scheduler.py
@@ -28,9 +28,7 @@ class BaseParamScheduler(metaclass=ABCMeta):
"""
- def __init__(
- self, param_name: str, save_history: bool = False,
- ):
+ def __init__(self, param_name: str, save_history: bool = False):
self.param_name = param_name
self.event_index = 0
self._save_history = save_history
@@ -497,8 +495,7 @@ class CosineAnnealingScheduler(CyclicalScheduler):
"""
def get_param(self) -> float:
- """Method to get current optimizer's parameter value
- """
+ """Method to get current optimizer's parameter value"""
cycle_progress = self.event_index / self.cycle_size
return self.start_value + ((self.end_value - self.start_value) / 2) * (1 - math.cos(math.pi * cycle_progress))
@@ -782,8 +779,7 @@ def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None
super(LRScheduler, self).__call__(engine, name)
def get_param(self) -> Union[float, List[float]]:
- """Method to get current optimizer's parameter value
- """
+ """Method to get current optimizer's parameter value"""
# Emulate context manager for pytorch>=1.4
self.lr_scheduler._get_lr_called_within_step = True # type: ignore[attr-defined]
lr_list = cast(List[float], self.lr_scheduler.get_lr())
diff --git a/ignite/handlers/state_param_scheduler.py b/ignite/handlers/state_param_scheduler.py
index ac4b72371a0b..bfee9726c55a 100644
--- a/ignite/handlers/state_param_scheduler.py
+++ b/ignite/handlers/state_param_scheduler.py
@@ -302,7 +302,7 @@ class ExpStateScheduler(StateParamScheduler):
"""
def __init__(
- self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False,
+ self, initial_value: float, gamma: float, param_name: str, save_history: bool = False, create_new: bool = False
):
super(ExpStateScheduler, self).__init__(param_name, save_history, create_new)
self.initial_value = initial_value
diff --git a/ignite/handlers/time_profilers.py b/ignite/handlers/time_profilers.py
index 0d9e46d65e56..be0d8c6e8840 100644
--- a/ignite/handlers/time_profilers.py
+++ b/ignite/handlers/time_profilers.py
@@ -261,7 +261,7 @@ def get_results(self) -> Dict[str, Dict[str, Any]]:
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
- ("event_handlers_stats", event_handlers_stats,),
+ ("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
@@ -601,7 +601,7 @@ def get_results(self) -> List[List[Union[str, float]]]:
for h in self.event_handlers_times[e]
]
)
- total_eh_time = round(float(total_eh_time), 5,)
+ total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
@@ -681,9 +681,9 @@ def write_results(self, output_path: str) -> None:
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
- results_dump = torch.stack(cols, dim=1,).numpy()
+ results_dump = torch.stack(cols, dim=1).numpy()
- results_df = pd.DataFrame(data=results_dump, columns=headers,)
+ results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
diff --git a/ignite/handlers/timing.py b/ignite/handlers/timing.py
index 5f105ffdb587..16c65dc05705 100644
--- a/ignite/handlers/timing.py
+++ b/ignite/handlers/timing.py
@@ -7,7 +7,7 @@
class Timer:
- """ Timer object can be used to measure (average) time between events.
+ """Timer object can be used to measure (average) time between events.
Args:
average: if True, then when ``.value()`` method is called, the returned value
@@ -96,7 +96,7 @@ def attach(
resume: Optional[Events] = None,
step: Optional[Events] = None,
) -> "Timer":
- """ Register callbacks to control the timer.
+ """Register callbacks to control the timer.
Args:
engine: Engine that this timer will be attached to.
diff --git a/ignite/metrics/classification_report.py b/ignite/metrics/classification_report.py
index b2f04d797fb2..cd0265432da8 100644
--- a/ignite/metrics/classification_report.py
+++ b/ignite/metrics/classification_report.py
@@ -106,15 +106,15 @@ def ClassificationReport(
"""
# setup all the underlying metrics
- precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device,)
- recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device,)
+ precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
+ recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
fbeta = Fbeta(beta, average=False, precision=precision, recall=recall)
averaged_precision = precision.mean()
averaged_recall = recall.mean()
averaged_fbeta = fbeta.mean()
def _wrapper(
- recall_metric: Metric, precision_metric: Metric, f: Metric, a_recall: Metric, a_precision: Metric, a_f: Metric,
+ recall_metric: Metric, precision_metric: Metric, f: Metric, a_recall: Metric, a_precision: Metric, a_f: Metric
) -> Union[Collection[str], Dict]:
p_tensor, r_tensor, f_tensor = precision_metric, recall_metric, f
if p_tensor.shape != r_tensor.shape:
@@ -141,4 +141,4 @@ def _wrapper(
def _get_label_for_class(idx: int) -> str:
return labels[idx] if labels else str(idx)
- return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta,)
+ return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta)
diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py
index 82059146c592..f48df9714d98 100644
--- a/ignite/metrics/metric.py
+++ b/ignite/metrics/metric.py
@@ -204,7 +204,7 @@ def compute(self):
_required_output_keys = required_output_keys
def __init__(
- self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu"),
+ self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
self._output_transform = output_transform
@@ -616,4 +616,4 @@ def _is_list_of_tensors_or_numbers(x: Sequence[Union[torch.Tensor, float]]) -> b
def _to_batched_tensor(x: Union[torch.Tensor, float], device: Optional[torch.device] = None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
return x.unsqueeze(dim=0)
- return torch.tensor([x,], device=device)
+ return torch.tensor([x], device=device)
diff --git a/ignite/metrics/nlp/bleu.py b/ignite/metrics/nlp/bleu.py
index ab8dfb5cc37f..6f662046b8a9 100644
--- a/ignite/metrics/nlp/bleu.py
+++ b/ignite/metrics/nlp/bleu.py
@@ -181,7 +181,7 @@ def _n_gram_counter(
return hyp_lengths, ref_lengths
def _brevity_penalty_smoothing(
- self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int,
+ self, p_numerators: torch.Tensor, p_denominators: torch.Tensor, hyp_length_sum: int, ref_length_sum: int
) -> float:
# Returns 0 if there's no matching n-grams
@@ -208,18 +208,16 @@ def _brevity_penalty_smoothing(
gm = bp * math.exp(math.fsum(s))
return gm
- def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any],) -> float:
+ def _sentence_bleu(self, references: Sequence[Sequence[Any]], candidates: Sequence[Any]) -> float:
return self._corpus_bleu([references], [candidates])
- def _corpus_bleu(
- self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]],
- ) -> float:
+ def _corpus_bleu(self, references: Sequence[Sequence[Sequence[Any]]], candidates: Sequence[Sequence[Any]]) -> float:
p_numerators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
p_denominators: torch.Tensor = torch.zeros(self.ngrams_order + 1)
hyp_length_sum, ref_length_sum = self._n_gram_counter(
- references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators,
+ references=references, candidates=candidates, p_numerators=p_numerators, p_denominators=p_denominators
)
bleu_score = self._brevity_penalty_smoothing(
p_numerators=p_numerators,
diff --git a/ignite/metrics/nlp/rouge.py b/ignite/metrics/nlp/rouge.py
index 2784e51c9a5c..232663843160 100644
--- a/ignite/metrics/nlp/rouge.py
+++ b/ignite/metrics/nlp/rouge.py
@@ -152,7 +152,7 @@ def reset(self) -> None:
def update(self, output: Tuple[Sequence[Sequence[Any]], Sequence[Sequence[Sequence[Any]]]]) -> None:
candidates, references = output
for _candidate, _reference in zip(candidates, references):
- multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref,) for _ref in _reference]
+ multiref_scores = [self._compute_score(candidate=_candidate, reference=_ref) for _ref in _reference]
score = self._mutliref_reducer(multiref_scores)
precision = score.precision()
recall = score.recall()
diff --git a/ignite/utils.py b/ignite/utils.py
index 5bc387e81b55..23995bde0d1b 100644
--- a/ignite/utils.py
+++ b/ignite/utils.py
@@ -285,7 +285,7 @@ def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:
return decorator
-def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path],) -> Tuple[Path, str]:
+def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path]) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``-.``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
diff --git a/setup.py b/setup.py
index 9fc3ad74e7c1..cbfdc0a31edb 100644
--- a/setup.py
+++ b/setup.py
@@ -40,7 +40,7 @@ def find_version(*file_paths):
long_description=readme,
license="BSD",
# Package info
- packages=find_packages(exclude=("tests", "tests.*",)),
+ packages=find_packages(exclude=("tests", "tests.*")),
package_data={"ignite": ["py.typed"]},
zip_safe=False,
install_requires=requirements,
diff --git a/tests/ignite/conftest.py b/tests/ignite/conftest.py
index 5758bb892622..3f1e27cc3551 100644
--- a/tests/ignite/conftest.py
+++ b/tests/ignite/conftest.py
@@ -51,7 +51,7 @@ def func():
@pytest.fixture()
def local_rank(worker_id):
- """ use a different account in each xdist worker """
+ """use a different account in each xdist worker"""
if "gw" in worker_id:
lrank = int(worker_id.replace("gw", ""))
diff --git a/tests/ignite/contrib/engines/test_common.py b/tests/ignite/contrib/engines/test_common.py
index aed310d66bd6..5ad323a862fa 100644
--- a/tests/ignite/contrib/engines/test_common.py
+++ b/tests/ignite/contrib/engines/test_common.py
@@ -57,7 +57,7 @@ def _test_setup_common_training_handlers(
model = DummyModel().to(device)
if distributed and "cuda" in torch.device(device).type:
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank,], output_device=local_rank)
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank)
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
if lr_scheduler is None:
@@ -98,7 +98,7 @@ def update_fn(engine, batch):
save_handler=save_handler,
lr_scheduler=lr_scheduler,
with_gpu_stats=False,
- output_names=["batch_loss",],
+ output_names=["batch_loss"],
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=50,
@@ -358,8 +358,8 @@ def set_eval_metric(engine):
evaluators = evaluators["validation"]
if with_optim:
- t = torch.tensor([0,])
- optimizers = {"optimizer": torch.optim.SGD([t,], lr=0.01)}
+ t = torch.tensor([0])
+ optimizers = {"optimizer": torch.optim.SGD([t], lr=0.01)}
if as_class:
optimizers = optimizers["optimizer"]
diff --git a/tests/ignite/contrib/handlers/test_mlflow_logger.py b/tests/ignite/contrib/handlers/test_mlflow_logger.py
index d907b9a873c8..949f1661b628 100644
--- a/tests/ignite/contrib/handlers/test_mlflow_logger.py
+++ b/tests/ignite/contrib/handlers/test_mlflow_logger.py
@@ -44,9 +44,7 @@ def test_output_handler_output_transform():
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
- mock_logger.log_metrics.assert_called_once_with(
- {"another_tag loss": 12345}, step=123,
- )
+ mock_logger.log_metrics.assert_called_once_with({"another_tag loss": 12345}, step=123)
def test_output_handler_metric_names():
@@ -62,11 +60,9 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
- mock_logger.log_metrics.assert_called_once_with(
- {"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5,
- )
+ mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag c": 10.0}, step=5)
- wrapper = OutputHandler("tag", metric_names=["a",])
+ wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
@@ -79,7 +75,7 @@ def test_output_handler_metric_names():
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
- [call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5),], any_order=True
+ [call({"tag a 0": 0.0, "tag a 1": 1.0, "tag a 2": 2.0, "tag a 3": 3.0}, step=5)], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
@@ -112,9 +108,7 @@ def test_output_handler_both():
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
- mock_logger.log_metrics.assert_called_once_with(
- {"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5,
- )
+ mock_logger.log_metrics.assert_called_once_with({"tag a": 12.23, "tag b": 23.45, "tag loss": 12345}, step=5)
def test_output_handler_with_wrong_global_step_transform_output():
@@ -203,7 +197,7 @@ def test_output_handler_state_attrs():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
- {"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0,}, step=5,
+ {"tag alpha": 3.899, "tag beta": torch.tensor(12.21).item(), "tag gamma 0": 21.0, "tag gamma 1": 6.0}, step=5
)
@@ -336,7 +330,7 @@ def test_mlflow_bad_metric_name_handling(dirname):
handler = OutputHandler(tag="training", metric_names="all")
engine = Engine(lambda e, b: None)
- engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0,})
+ engine.state = State(metrics={"metric:0 in %": 123.0, "metric 0": 1000.0})
with pytest.warns(UserWarning, match=r"MLflowLogger output_handler encountered an invalid metric name"):
@@ -353,7 +347,7 @@ def test_mlflow_bad_metric_name_handling(dirname):
client = MlflowClient(tracking_uri=os.path.join(dirname, "mlruns"))
stored_values = client.get_metric_history(active_run.info.run_id, "training metric 0")
- for t, s in zip([1000.0,] + true_values, stored_values):
+ for t, s in zip([1000.0] + true_values, stored_values):
assert t == s.value
diff --git a/tests/ignite/contrib/handlers/test_neptune_logger.py b/tests/ignite/contrib/handlers/test_neptune_logger.py
index 11186422b485..8224459be73b 100644
--- a/tests/ignite/contrib/handlers/test_neptune_logger.py
+++ b/tests/ignite/contrib/handlers/test_neptune_logger.py
@@ -94,11 +94,9 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 2
- mock_logger.log_metric.assert_has_calls(
- [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True,
- )
+ mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True)
- wrapper = OutputHandler("tag", metric_names=["a",],)
+ wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_logger.log_metric = MagicMock()
@@ -134,9 +132,7 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 1
- mock_logger.log_metric.assert_has_calls(
- [call("tag/a", y=55.56, x=7),], any_order=True,
- )
+ mock_logger.log_metric.assert_has_calls([call("tag/a", y=55.56, x=7)], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
@@ -149,9 +145,7 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metric.call_count == 2
- mock_logger.log_metric.assert_has_calls(
- [call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5),], any_order=True,
- )
+ mock_logger.log_metric.assert_has_calls([call("tag/a", y=12.23, x=5), call("tag/b", y=23.45, x=5)], any_order=True)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
@@ -165,7 +159,7 @@ def test_output_handler_metric_names():
assert mock_logger.log_metric.call_count == 2
mock_logger.log_metric.assert_has_calls(
- [call("tag/a", y=torch.tensor(12.23).item(), x=5), call("tag/b", y=torch.tensor(23.45).item(), x=5),],
+ [call("tag/a", y=torch.tensor(12.23).item(), x=5), call("tag/b", y=torch.tensor(23.45).item(), x=5)],
any_order=True,
)
@@ -342,13 +336,13 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metric.assert_has_calls(
- [call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5),],
+ [call("weights_norm/fc2/weight", y=12.0, x=5), call("weights_norm/fc2/bias", y=math.sqrt(12.0), x=5)],
any_order=True,
)
with pytest.raises(AssertionError):
mock_logger.log_metric.assert_has_calls(
- [call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5),],
+ [call("weights_norm/fc1/weight", y=12.0, x=5), call("weights_norm/fc1/bias", y=math.sqrt(12.0), x=5)],
any_order=True,
)
@@ -417,12 +411,12 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metric.assert_has_calls(
- [call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5),], any_order=True,
+ [call("grads_norm/fc2/weight", y=ANY, x=5), call("grads_norm/fc2/bias", y=ANY, x=5)], any_order=True
)
with pytest.raises(AssertionError):
mock_logger.log_metric.assert_has_calls(
- [call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5),], any_order=True,
+ [call("grads_norm/fc1/weight", y=ANY, x=5), call("grads_norm/fc1/bias", y=ANY, x=5)], any_order=True
)
assert mock_logger.log_metric.call_count == 2
assert norm_mock.call_count == 2
diff --git a/tests/ignite/contrib/handlers/test_polyaxon_logger.py b/tests/ignite/contrib/handlers/test_polyaxon_logger.py
index 660ebbfb1f2a..a97c7709784d 100644
--- a/tests/ignite/contrib/handlers/test_polyaxon_logger.py
+++ b/tests/ignite/contrib/handlers/test_polyaxon_logger.py
@@ -63,7 +63,7 @@ def test_output_handler_metric_names():
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
- wrapper = OutputHandler("tag", metric_names=["a",])
+ wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
@@ -76,7 +76,7 @@ def test_output_handler_metric_names():
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
- [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True
+ [call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0})], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
@@ -212,8 +212,7 @@ def test_output_handler_state_attrs():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(
- **{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0,},
- step=5,
+ **{"tag/alpha": 3.899, "tag/beta": torch.tensor(12.21).item(), "tag/gamma/0": 21.0, "tag/gamma/1": 6.0}, step=5
)
@@ -322,7 +321,5 @@ def no_site_packages():
def test_no_polyaxon_client(no_site_packages):
- with pytest.raises(
- RuntimeError, match=r"This contrib module requires polyaxon",
- ):
+ with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon"):
PolyaxonLogger()
diff --git a/tests/ignite/contrib/handlers/test_tensorboard_logger.py b/tests/ignite/contrib/handlers/test_tensorboard_logger.py
index 2b06cce9e169..eaba3237480b 100644
--- a/tests/ignite/contrib/handlers/test_tensorboard_logger.py
+++ b/tests/ignite/contrib/handlers/test_tensorboard_logger.py
@@ -99,11 +99,9 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
- mock_logger.writer.add_scalar.assert_has_calls(
- [call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True,
- )
+ mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
- wrapper = OutputHandler("tag", metric_names=["a",],)
+ wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
@@ -116,7 +114,7 @@ def test_output_handler_metric_names():
assert mock_logger.writer.add_scalar.call_count == 4
mock_logger.writer.add_scalar.assert_has_calls(
- [call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5),],
+ [call("tag/a/0", 0.0, 5), call("tag/a/1", 1.0, 5), call("tag/a/2", 2.0, 5), call("tag/a/3", 3.0, 5)],
any_order=True,
)
@@ -133,9 +131,7 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 1
- mock_logger.writer.add_scalar.assert_has_calls(
- [call("tag/a", 55.56, 7),], any_order=True,
- )
+ mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 55.56, 7)], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
@@ -149,9 +145,7 @@ def test_output_handler_metric_names():
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.writer.add_scalar.call_count == 2
- mock_logger.writer.add_scalar.assert_has_calls(
- [call("tag/a", 12.23, 5), call("tag/b", 23.45, 5),], any_order=True,
- )
+ mock_logger.writer.add_scalar.assert_has_calls([call("tag/a", 12.23, 5), call("tag/b", 23.45, 5)], any_order=True)
# log a torch tensor (ndimension = 0)
wrapper = OutputHandler("tag", metric_names="all")
@@ -166,7 +160,7 @@ def test_output_handler_metric_names():
assert mock_logger.writer.add_scalar.call_count == 2
mock_logger.writer.add_scalar.assert_has_calls(
- [call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5),], any_order=True,
+ [call("tag/a", torch.tensor(12.23).item(), 5), call("tag/b", torch.tensor(23.45).item(), 5)], any_order=True
)
@@ -328,12 +322,12 @@ def test_weights_scalar_handler_frozen_layers(dummy_model_factory):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
- [call("weights_norm/fc2/weight", 12.0, 5), call("weights_norm/fc2/bias", math.sqrt(12.0), 5),], any_order=True,
+ [call("weights_norm/fc2/weight", 12.0, 5), call("weights_norm/fc2/bias", math.sqrt(12.0), 5)], any_order=True
)
with pytest.raises(AssertionError):
mock_logger.writer.add_scalar.assert_has_calls(
- [call("weights_norm/fc1/weight", 12.0, 5), call("weights_norm/fc1/bias", math.sqrt(12.0), 5),],
+ [call("weights_norm/fc1/weight", 12.0, 5), call("weights_norm/fc1/bias", math.sqrt(12.0), 5)],
any_order=True,
)
@@ -484,12 +478,12 @@ def test_grads_scalar_handler_frozen_layers(dummy_model_factory, norm_mock):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.writer.add_scalar.assert_has_calls(
- [call("grads_norm/fc2/weight", ANY, 5), call("grads_norm/fc2/bias", ANY, 5),], any_order=True,
+ [call("grads_norm/fc2/weight", ANY, 5), call("grads_norm/fc2/bias", ANY, 5)], any_order=True
)
with pytest.raises(AssertionError):
mock_logger.writer.add_scalar.assert_has_calls(
- [call("grads_norm/fc1/weight", ANY, 5), call("grads_norm/fc1/bias", ANY, 5),], any_order=True,
+ [call("grads_norm/fc1/weight", ANY, 5), call("grads_norm/fc1/bias", ANY, 5)], any_order=True
)
assert mock_logger.writer.add_scalar.call_count == 2
assert norm_mock.call_count == 2
diff --git a/tests/ignite/contrib/handlers/test_tqdm_logger.py b/tests/ignite/contrib/handlers/test_tqdm_logger.py
index 41bcc40d6b58..89d4447428c5 100644
--- a/tests/ignite/contrib/handlers/test_tqdm_logger.py
+++ b/tests/ignite/contrib/handlers/test_tqdm_logger.py
@@ -157,9 +157,7 @@ def step(engine, batch):
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
- pbar.attach(
- trainer, metric_names=["batchloss",],
- )
+ pbar.attach(trainer, metric_names=["batchloss"])
trainer.run(data=data, max_epochs=1)
@@ -227,7 +225,7 @@ def step(engine, batch):
RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")
pbar = ProgressBar()
- pbar.attach(trainer, metric_names=["batchloss",], state_attributes=["alpha", "beta", "gamma"])
+ pbar.attach(trainer, metric_names=["batchloss"], state_attributes=["alpha", "beta", "gamma"])
trainer.run(data=data, max_epochs=1)
diff --git a/tests/ignite/contrib/handlers/test_visdom_logger.py b/tests/ignite/contrib/handlers/test_visdom_logger.py
index 465ea5eb2a59..3faca55c47ec 100644
--- a/tests/ignite/contrib/handlers/test_visdom_logger.py
+++ b/tests/ignite/contrib/handlers/test_visdom_logger.py
@@ -48,8 +48,8 @@ def test_optimizer_params():
assert wrapper.windows["lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
- X=[123,],
- Y=[0.01,],
+ X=[123],
+ Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -68,8 +68,8 @@ def test_optimizer_params():
assert wrapper.windows["generator/lr/group_0"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
- X=[123,],
- Y=[0.01,],
+ X=[123],
+ Y=[0.01],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -106,8 +106,8 @@ def test_output_handler_output_transform(dirname):
assert wrapper.windows["tag/output"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
- X=[123,],
- Y=[12345,],
+ X=[123],
+ Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -126,8 +126,8 @@ def test_output_handler_output_transform(dirname):
assert wrapper.windows["another_tag/loss"]["win"] is not None
mock_logger.vis.line.assert_called_once_with(
- X=[123,],
- Y=[12345,],
+ X=[123],
+ Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -157,8 +157,8 @@ def test_output_handler_metric_names(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[12.23,],
+ X=[5],
+ Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -166,8 +166,8 @@ def test_output_handler_metric_names(dirname):
name="tag/a",
),
call(
- X=[5,],
- Y=[23.45,],
+ X=[5],
+ Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -178,7 +178,7 @@ def test_output_handler_metric_names(dirname):
any_order=True,
)
- wrapper = OutputHandler("tag", metric_names=["a",])
+ wrapper = OutputHandler("tag", metric_names=["a"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
@@ -200,8 +200,8 @@ def test_output_handler_metric_names(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[0.0,],
+ X=[5],
+ Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -209,8 +209,8 @@ def test_output_handler_metric_names(dirname):
name="tag/a/0",
),
call(
- X=[5,],
- Y=[1.0,],
+ X=[5],
+ Y=[1.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -218,8 +218,8 @@ def test_output_handler_metric_names(dirname):
name="tag/a/1",
),
call(
- X=[5,],
- Y=[2.0,],
+ X=[5],
+ Y=[2.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -227,8 +227,8 @@ def test_output_handler_metric_names(dirname):
name="tag/a/2",
),
call(
- X=[5,],
- Y=[3.0,],
+ X=[5],
+ Y=[3.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -259,8 +259,8 @@ def test_output_handler_metric_names(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[7,],
- Y=[55.56,],
+ X=[7],
+ Y=[55.56],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -291,8 +291,8 @@ def test_output_handler_metric_names(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[12.23,],
+ X=[5],
+ Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -300,8 +300,8 @@ def test_output_handler_metric_names(dirname):
name="tag/a",
),
call(
- X=[5,],
- Y=[23.45,],
+ X=[5],
+ Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -341,8 +341,8 @@ def test_output_handler_both(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[12.23,],
+ X=[5],
+ Y=[12.23],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -350,8 +350,8 @@ def test_output_handler_both(dirname):
name="tag/a",
),
call(
- X=[5,],
- Y=[23.45,],
+ X=[5],
+ Y=[23.45],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -359,8 +359,8 @@ def test_output_handler_both(dirname):
name="tag/b",
),
call(
- X=[5,],
- Y=[12345,],
+ X=[5],
+ Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -388,8 +388,8 @@ def test_output_handler_both(dirname):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[6,],
- Y=[12.23,],
+ X=[6],
+ Y=[12.23],
env=mock_logger.vis.env,
win=wrapper.windows["tag/a"]["win"],
update="append",
@@ -397,8 +397,8 @@ def test_output_handler_both(dirname):
name="tag/a",
),
call(
- X=[6,],
- Y=[23.45,],
+ X=[6],
+ Y=[23.45],
env=mock_logger.vis.env,
win=wrapper.windows["tag/b"]["win"],
update="append",
@@ -406,8 +406,8 @@ def test_output_handler_both(dirname):
name="tag/b",
),
call(
- X=[6,],
- Y=[12345,],
+ X=[6],
+ Y=[12345],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
@@ -450,8 +450,8 @@ def test_output_handler_state_attrs():
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[3.899,],
+ X=[5],
+ Y=[3.899],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -459,8 +459,8 @@ def test_output_handler_state_attrs():
name="tag/alpha",
),
call(
- X=[5,],
- Y=[12.0,],
+ X=[5],
+ Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -468,8 +468,8 @@ def test_output_handler_state_attrs():
name="tag/beta",
),
call(
- X=[5,],
- Y=[21.0,],
+ X=[5],
+ Y=[21.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -477,8 +477,8 @@ def test_output_handler_state_attrs():
name="tag/gamma/0",
),
call(
- X=[5,],
- Y=[6.0,],
+ X=[5],
+ Y=[6.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -530,8 +530,8 @@ def global_step_transform(*args, **kwargs):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[10,],
- Y=[12345,],
+ X=[10],
+ Y=[12345],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -571,8 +571,8 @@ def test_output_handler_with_global_step_from_engine():
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[mock_another_engine.state.epoch,],
- Y=[mock_engine.state.output,],
+ X=[mock_another_engine.state.epoch],
+ Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -592,8 +592,8 @@ def test_output_handler_with_global_step_from_engine():
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[mock_another_engine.state.epoch,],
- Y=[mock_engine.state.output,],
+ X=[mock_another_engine.state.epoch],
+ Y=[mock_engine.state.output],
env=mock_logger.vis.env,
win=wrapper.windows["tag/loss"]["win"],
update="append",
@@ -655,8 +655,8 @@ def _test(tag=None):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[0.0,],
+ X=[5],
+ Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -664,8 +664,8 @@ def _test(tag=None):
name=tag_prefix + "weights_norm/fc1/weight",
),
call(
- X=[5,],
- Y=[0.0,],
+ X=[5],
+ Y=[0.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -673,8 +673,8 @@ def _test(tag=None):
name=tag_prefix + "weights_norm/fc1/bias",
),
call(
- X=[5,],
- Y=[12.0,],
+ X=[5],
+ Y=[12.0],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -682,7 +682,7 @@ def _test(tag=None):
name=tag_prefix + "weights_norm/fc2/weight",
),
call(
- X=[5,],
+ X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
@@ -729,8 +729,8 @@ def norm(x):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
- Y=[12.34,],
+ X=[5],
+ Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -738,8 +738,8 @@ def norm(x):
name="weights_norm/fc1/weight",
),
call(
- X=[5,],
- Y=[12.34,],
+ X=[5],
+ Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -747,8 +747,8 @@ def norm(x):
name="weights_norm/fc1/bias",
),
call(
- X=[5,],
- Y=[12.34,],
+ X=[5],
+ Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -756,8 +756,8 @@ def norm(x):
name="weights_norm/fc2/weight",
),
call(
- X=[5,],
- Y=[12.34,],
+ X=[5],
+ Y=[12.34],
env=mock_logger.vis.env,
win=None,
update=None,
@@ -820,7 +820,7 @@ def _test(tag=None):
mock_logger.vis.line.assert_has_calls(
[
call(
- X=[5,],
+ X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
@@ -829,7 +829,7 @@ def _test(tag=None):
name=tag_prefix + "grads_norm/fc1/weight",
),
call(
- X=[5,],
+ X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
@@ -838,7 +838,7 @@ def _test(tag=None):
name=tag_prefix + "grads_norm/fc1/bias",
),
call(
- X=[5,],
+ X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
@@ -847,7 +847,7 @@ def _test(tag=None):
name=tag_prefix + "grads_norm/fc2/weight",
),
call(
- X=[5,],
+ X=[5],
Y=ANY,
env=mock_logger.vis.env,
win=None,
diff --git a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py
index b30a9c402e68..2b220727e580 100644
--- a/tests/ignite/contrib/metrics/regression/test_canberra_metric.py
+++ b/tests/ignite/contrib/metrics/regression/test_canberra_metric.py
@@ -17,7 +17,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py
index dc45bd6d0a98..59d72e4faf5f 100644
--- a/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_fractional_absolute_error.py
@@ -25,7 +25,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
diff --git a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
index 7079b2baa438..45cd238abe3d 100644
--- a/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
+++ b/tests/ignite/contrib/metrics/regression/test_fractional_bias.py
@@ -25,7 +25,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_fractional_bias():
diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py
index eae47c7fa71a..50b7f1db77d3 100644
--- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_absolute_error.py
@@ -25,7 +25,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
diff --git a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py
index b1d6a8776f07..5009d6bc8f05 100644
--- a/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_geometric_mean_relative_absolute_error.py
@@ -26,13 +26,13 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
size = 51
- np_y_pred = np.random.rand(size,)
- np_y = np.random.rand(size,)
+ np_y_pred = np.random.rand(size)
+ np_y = np.random.rand(size)
np_gmrae = np.exp(np.log(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean())).mean())
m = GeometricMeanRelativeAbsoluteError()
diff --git a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
index 2ed6726db4a6..5e794d52de86 100644
--- a/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
+++ b/tests/ignite/contrib/metrics/regression/test_manhattan_distance.py
@@ -17,7 +17,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_mahattan_distance():
diff --git a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py
index 828a9dcc1ee4..cf1436ae0251 100644
--- a/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py
@@ -25,7 +25,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_maximum_absolute_error():
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py
index 90e8baceb497..0b9754acf614 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_mean_absolute_relative_error.py
@@ -18,7 +18,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_absolute_relative_error():
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_error.py b/tests/ignite/contrib/metrics/regression/test_mean_error.py
index f66c82e1236b..b7659f810dab 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_mean_error.py
@@ -23,7 +23,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
diff --git a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py
index b9036287ff1c..480d6552d5c4 100644
--- a/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py
+++ b/tests/ignite/contrib/metrics/regression/test_mean_normalized_bias.py
@@ -35,7 +35,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_mean_error():
diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py
index ebe063293626..2fd3e5fe3597 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_error.py
@@ -28,10 +28,10 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
- m.update((torch.rand(4, 1, 2), torch.rand(4,),))
+ m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
- m.update((torch.rand(4,), torch.rand(4, 1, 2),))
+ m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_error():
@@ -42,8 +42,8 @@ def test_median_absolute_error():
# Size of dataset will be odd for these tests
size = 51
- np_y_pred = np.random.rand(size,)
- np_y = np.random.rand(size,)
+ np_y_pred = np.random.rand(size)
+ np_y = np.random.rand(size)
np_median_absolute_error = np.median(np.abs(np_y - np_y_pred))
m = MedianAbsoluteError()
diff --git a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py
index a463b6406e05..af28afdc1770 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_median_absolute_percentage_error.py
@@ -28,10 +28,10 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
- m.update((torch.rand(4, 1, 2), torch.rand(4,),))
+ m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
- m.update((torch.rand(4,), torch.rand(4, 1, 2),))
+ m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_absolute_percentage_error():
@@ -42,8 +42,8 @@ def test_median_absolute_percentage_error():
# Size of dataset will be odd for these tests
size = 51
- np_y_pred = np.random.rand(size,)
- np_y = np.random.rand(size,)
+ np_y_pred = np.random.rand(size)
+ np_y = np.random.rand(size)
np_median_absolute_percentage_error = 100.0 * np.median(np.abs(np_y - np_y_pred) / np.abs(np_y))
m = MedianAbsolutePercentageError()
diff --git a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py
index 06c5ab2eea53..de43a173704d 100644
--- a/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py
+++ b/tests/ignite/contrib/metrics/regression/test_median_relative_absolute_error.py
@@ -28,10 +28,10 @@ def test_wrong_input_shapes():
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Predictions should be of shape"):
- m.update((torch.rand(4, 1, 2), torch.rand(4,),))
+ m.update((torch.rand(4, 1, 2), torch.rand(4)))
with pytest.raises(ValueError, match=r"Targets should be of shape"):
- m.update((torch.rand(4,), torch.rand(4, 1, 2),))
+ m.update((torch.rand(4), torch.rand(4, 1, 2)))
def test_median_relative_absolute_error():
@@ -42,8 +42,8 @@ def test_median_relative_absolute_error():
# Size of dataset will be odd for these tests
size = 51
- np_y_pred = np.random.rand(size,)
- np_y = np.random.rand(size,)
+ np_y_pred = np.random.rand(size)
+ np_y = np.random.rand(size)
np_median_absolute_relative_error = np.median(np.abs(np_y - np_y_pred) / np.abs(np_y - np_y.mean()))
m = MedianRelativeAbsoluteError()
diff --git a/tests/ignite/contrib/metrics/regression/test_r2_score.py b/tests/ignite/contrib/metrics/regression/test_r2_score.py
index 4a87089f3304..89fe468c22d9 100644
--- a/tests/ignite/contrib/metrics/regression/test_r2_score.py
+++ b/tests/ignite/contrib/metrics/regression/test_r2_score.py
@@ -24,14 +24,14 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_r2_score():
size = 51
- np_y_pred = np.random.rand(size,)
- np_y = np.random.rand(size,)
+ np_y_pred = np.random.rand(size)
+ np_y = np.random.rand(size)
m = R2Score()
y_pred = torch.from_numpy(np_y_pred)
diff --git a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py
index da8ba88f7f20..1c1af7fb38d6 100644
--- a/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py
+++ b/tests/ignite/contrib/metrics/regression/test_wave_hedges_distance.py
@@ -16,7 +16,7 @@ def test_wrong_input_shapes():
m.update((torch.rand(4), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
- m.update((torch.rand(4, 1), torch.rand(4,)))
+ m.update((torch.rand(4, 1), torch.rand(4)))
def test_compute():
diff --git a/tests/ignite/contrib/metrics/test_average_precision.py b/tests/ignite/contrib/metrics/test_average_precision.py
index 7b7f55aaca0e..1627e04c40e4 100644
--- a/tests/ignite/contrib/metrics/test_average_precision.py
+++ b/tests/ignite/contrib/metrics/test_average_precision.py
@@ -240,7 +240,7 @@ def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
def get_tests(is_N):
if is_N:
y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device)
- y_preds = torch.rand(offset * idist.get_world_size(),).to(device)
+ y_preds = torch.rand(offset * idist.get_world_size()).to(device)
def update_fn(engine, i):
return (
diff --git a/tests/ignite/contrib/metrics/test_cohen_kappa.py b/tests/ignite/contrib/metrics/test_cohen_kappa.py
index 32f9bbf9a1f3..c2c004f8eddb 100644
--- a/tests/ignite/contrib/metrics/test_cohen_kappa.py
+++ b/tests/ignite/contrib/metrics/test_cohen_kappa.py
@@ -297,12 +297,8 @@ def test_distrib_hvd(gloo_hvd_executor):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
- gloo_hvd_executor(
- _test_distrib_binary_input, (device,), np=nproc, do_init=True,
- )
- gloo_hvd_executor(
- _test_distrib_integration_binary_input, (device,), np=nproc, do_init=True,
- )
+ gloo_hvd_executor(_test_distrib_binary_input, (device,), np=nproc, do_init=True)
+ gloo_hvd_executor(_test_distrib_integration_binary_input, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
diff --git a/tests/ignite/contrib/metrics/test_gpu_info.py b/tests/ignite/contrib/metrics/test_gpu_info.py
index 97ee72b83905..a63823ad9fef 100644
--- a/tests/ignite/contrib/metrics/test_gpu_info.py
+++ b/tests/ignite/contrib/metrics/test_gpu_info.py
@@ -139,9 +139,9 @@ def getInstance():
_test_with_custom_query(resp={}, warn_msg=r"No GPU information available", check_compute=True)
# No GPU memory info
- _test_with_custom_query(resp={"gpu": [{"utilization": {}},]}, warn_msg=r"No GPU memory usage information available")
+ _test_with_custom_query(resp={"gpu": [{"utilization": {}}]}, warn_msg=r"No GPU memory usage information available")
# No GPU utilization info
_test_with_custom_query(
- resp={"gpu": [{"fb_memory_usage": {}},]}, warn_msg=r"No GPU utilization information available"
+ resp={"gpu": [{"fb_memory_usage": {}}]}, warn_msg=r"No GPU utilization information available"
)
diff --git a/tests/ignite/contrib/metrics/test_roc_auc.py b/tests/ignite/contrib/metrics/test_roc_auc.py
index aa34089cdbc2..9daf384939b1 100644
--- a/tests/ignite/contrib/metrics/test_roc_auc.py
+++ b/tests/ignite/contrib/metrics/test_roc_auc.py
@@ -253,7 +253,7 @@ def _test(y_preds, y_true, n_epochs, metric_device, update_fn):
def get_tests(is_N):
if is_N:
y_true = torch.randint(0, n_classes, size=(offset * idist.get_world_size(),)).to(device)
- y_preds = torch.rand(offset * idist.get_world_size(),).to(device)
+ y_preds = torch.rand(offset * idist.get_world_size()).to(device)
def update_fn(engine, i):
return (
diff --git a/tests/ignite/distributed/comp_models/test_base.py b/tests/ignite/distributed/comp_models/test_base.py
index 6a7ca20d35dd..34bb1f68f9a7 100644
--- a/tests/ignite/distributed/comp_models/test_base.py
+++ b/tests/ignite/distributed/comp_models/test_base.py
@@ -50,10 +50,10 @@ def test__encode_input_data():
assert encoded_msg == [-1] * 512
encoded_msg = ComputationModel._encode_input_data(12.0, is_src=True)
- assert encoded_msg == [1,] + [-1] * 511
+ assert encoded_msg == [1] + [-1] * 511
encoded_msg = ComputationModel._encode_input_data("abc", is_src=True)
- assert encoded_msg == [2,] + [-1] * 511
+ assert encoded_msg == [2] + [-1] * 511
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
diff --git a/tests/ignite/distributed/comp_models/test_xla.py b/tests/ignite/distributed/comp_models/test_xla.py
index 798b50c04e7c..7fa92c0088b4 100644
--- a/tests/ignite/distributed/comp_models/test_xla.py
+++ b/tests/ignite/distributed/comp_models/test_xla.py
@@ -42,9 +42,7 @@ def _test_xla_spawn_fn(local_rank, world_size, device):
@pytest.mark.skipif(not has_xla_support, reason="Skip if no PyTorch XLA package")
def test__xla_dist_model_spawn_one_proc():
try:
- _XlaDistModel.spawn(
- _test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1,
- )
+ _XlaDistModel.spawn(_test_xla_spawn_fn, args=(1, "xla"), kwargs_dict={}, nproc_per_node=1)
except SystemExit:
pass
@@ -55,9 +53,7 @@ def test__xla_dist_model_spawn_one_proc():
def test__xla_dist_model_spawn_n_procs():
n = int(os.environ["NUM_TPU_WORKERS"])
try:
- _XlaDistModel.spawn(
- _test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n,
- )
+ _XlaDistModel.spawn(_test_xla_spawn_fn, args=(n, "xla"), kwargs_dict={}, nproc_per_node=n)
except SystemExit:
pass
@@ -187,7 +183,7 @@ def training_step(engine, _):
# THIS CAN BE A CAUSE OF CRASH if DEVICE is OTHER THAN device
tensor = torch.tensor([fold + 1.0], dtype=torch.float).to(comp_model.device())
- xm.all_reduce("max", [tensor,])
+ xm.all_reduce("max", [tensor])
time.sleep(0.01 * fold)
diff --git a/tests/ignite/distributed/utils/__init__.py b/tests/ignite/distributed/utils/__init__.py
index 91f15958431b..10c7eef7b69a 100644
--- a/tests/ignite/distributed/utils/__init__.py
+++ b/tests/ignite/distributed/utils/__init__.py
@@ -122,7 +122,7 @@ def _test_distrib_all_reduce(device):
def _test_distrib_all_gather(device):
res = torch.tensor(idist.all_gather(10), device=device)
- true_res = torch.tensor([10,] * idist.get_world_size(), device=device)
+ true_res = torch.tensor([10] * idist.get_world_size(), device=device)
assert (res == true_res).all()
t = torch.tensor(idist.get_rank(), device=device)
@@ -134,7 +134,7 @@ def _test_distrib_all_gather(device):
if idist.get_rank() == 0:
x = "abc"
res = idist.all_gather(x)
- true_res = ["abc",] + ["test-test"] * (idist.get_world_size() - 1)
+ true_res = ["abc"] + ["test-test"] * (idist.get_world_size() - 1)
assert res == true_res
base_x = "tests/ignite/distributed/utils/test_native.py" * 2000
@@ -143,7 +143,7 @@ def _test_distrib_all_gather(device):
x = "abc"
res = idist.all_gather(x)
- true_res = ["abc",] + [base_x] * (idist.get_world_size() - 1)
+ true_res = ["abc"] + [base_x] * (idist.get_world_size() - 1)
assert res == true_res
t = torch.arange(100, device=device).reshape(4, 25) * (idist.get_rank() + 1)
diff --git a/tests/ignite/engine/test_create_supervised.py b/tests/ignite/engine/test_create_supervised.py
index 9d5d65c51914..611b6a6c8d56 100644
--- a/tests/ignite/engine/test_create_supervised.py
+++ b/tests/ignite/engine/test_create_supervised.py
@@ -426,10 +426,10 @@ def test_create_supervised_trainer_on_cuda_amp():
model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
- gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp",
+ gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_supervised_trainer(
- gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp",
+ gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="amp"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="amp")
@@ -487,10 +487,10 @@ def test_create_supervised_trainer_on_cuda_apex():
model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
- gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex",
+ gradient_accumulation_steps=1, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_supervised_trainer(
- gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex",
+ gradient_accumulation_steps=3, model_device=model_device, trainer_device=trainer_device, amp_mode="apex"
)
_test_create_mocked_supervised_trainer(model_device=model_device, trainer_device=trainer_device, amp_mode="apex")
diff --git a/tests/ignite/engine/test_deterministic.py b/tests/ignite/engine/test_deterministic.py
index 42b24bada023..ac618ba969b9 100644
--- a/tests/ignite/engine/test_deterministic.py
+++ b/tests/ignite/engine/test_deterministic.py
@@ -283,9 +283,7 @@ def _(engine):
sampler.set_epoch(engine.state.epoch - 1)
torch.manual_seed(87)
- engine.run(
- orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length,
- )
+ engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
@@ -390,9 +388,7 @@ def _(engine):
sampler.set_epoch(engine.state.epoch)
torch.manual_seed(12)
- engine.run(
- orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length,
- )
+ engine.run(orig_dataloader, max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
@@ -471,9 +467,7 @@ def update_fn(_, batch):
engine = DeterministicEngine(update_fn)
torch.manual_seed(121)
- engine.run(
- infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length,
- )
+ engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_epoch * epoch_length)
@@ -527,9 +521,7 @@ def update_fn(_, batch):
engine = DeterministicEngine(update_fn)
torch.manual_seed(24)
- engine.run(
- infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length,
- )
+ engine.run(infinite_data_iterator(), max_epochs=max_epochs, epoch_length=epoch_length)
batch_checker = BatchChecker(seen_batchs, init_counter=resume_iteration)
diff --git a/tests/ignite/engine/test_engine.py b/tests/ignite/engine/test_engine.py
index 3dff845121d3..672d6eadf231 100644
--- a/tests/ignite/engine/test_engine.py
+++ b/tests/ignite/engine/test_engine.py
@@ -590,9 +590,7 @@ def train_fn(_, batch):
torch.manual_seed(1)
epoch_length = 6
trainer = Engine(train_fn)
- trainer.run(
- random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length,
- )
+ trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
def val_fn(_1, _2):
pass
@@ -611,9 +609,7 @@ def run_evaluation(_):
evaluator.run(random_val_data_generator(size), epoch_length=4)
torch.manual_seed(1)
- trainer.run(
- random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length,
- )
+ trainer.run(random_train_data_generator(size), max_epochs=4, epoch_length=epoch_length)
for i in range(epoch_length):
assert train_batches[epoch_length + i] != train_batches[2 * epoch_length + i]
diff --git a/tests/ignite/engine/test_event_handlers.py b/tests/ignite/engine/test_event_handlers.py
index f3e0f209c79a..be47e15decd2 100644
--- a/tests/ignite/engine/test_event_handlers.py
+++ b/tests/ignite/engine/test_event_handlers.py
@@ -426,7 +426,7 @@ def handle_iteration_completed(engine, completed_counter):
def test_returns_state():
engine = Engine(MagicMock(return_value=1))
- state = engine.run([0,])
+ state = engine.run([0])
assert isinstance(state, State)
diff --git a/tests/ignite/handlers/test_checkpoint.py b/tests/ignite/handlers/test_checkpoint.py
index 7897da355f0b..04cae4186348 100644
--- a/tests/ignite/handlers/test_checkpoint.py
+++ b/tests/ignite/handlers/test_checkpoint.py
@@ -526,7 +526,7 @@ def save_handler(c, f):
to_save = {"model": DummyModel()}
- checkpointer = Checkpoint(to_save, save_handler=save_handler,)
+ checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
@@ -797,7 +797,7 @@ def update_fn(_1, _2):
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
- engine.run([0, 1,], max_epochs=4)
+ engine.run([0, 1], max_epochs=4)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [3 * 2, 4 * 2]])
@@ -1203,9 +1203,7 @@ def _test_checkpoint_with_ddp(device):
torch.manual_seed(0)
model = DummyModel().to(device)
- device_ids = (
- None if "cpu" in device.type else [device,]
- )
+ device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
to_save = {"model": ddp_model}
@@ -1223,9 +1221,7 @@ def _test_checkpoint_with_ddp(device):
def _test_checkpoint_load_objects_ddp(device):
model = DummyModel().to(device)
- device_ids = (
- None if "cpu" in device.type else [device,]
- )
+ device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01)
@@ -1598,7 +1594,7 @@ def test_checkpoint_reset():
assert save_handler.call_count == 3
assert checkpointer.last_checkpoint == "model_124.pt"
assert len(checkpointer._saved) == 1
- assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt",])
+ assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt"])
def test_checkpoint_reset_with_engine(dirname):
@@ -1609,7 +1605,7 @@ def test_checkpoint_reset_with_engine(dirname):
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
- engine.run([0, 1,], max_epochs=10)
+ engine.run([0, 1], max_epochs=10)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [9 * 2, 10 * 2]])
assert sorted(os.listdir(dirname)) == expected
@@ -1617,7 +1613,7 @@ def test_checkpoint_reset_with_engine(dirname):
handler.reset()
engine.state.max_epochs = None
- engine.run([0, 1,], max_epochs=2)
+ engine.run([0, 1], max_epochs=2)
expected += [f"{_PREFIX}_{name}_{i}.pt" for i in [1 * 2, 2 * 2]]
assert sorted(os.listdir(dirname)) == sorted(expected)
diff --git a/tests/ignite/handlers/test_param_scheduler.py b/tests/ignite/handlers/test_param_scheduler.py
index e27c266d2c75..4b3fb2446412 100644
--- a/tests/ignite/handlers/test_param_scheduler.py
+++ b/tests/ignite/handlers/test_param_scheduler.py
@@ -337,15 +337,15 @@ def test_concat_scheduler_asserts():
scheduler_3 = CosineAnnealingScheduler(optimizer_2, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
- ConcatScheduler([scheduler_1, scheduler_3], durations=[30,])
+ ConcatScheduler([scheduler_1, scheduler_3], durations=[30])
scheduler_4 = CosineAnnealingScheduler(optimizer, "lr2", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError, match=r"schedulers should be related to same param_name"):
- ConcatScheduler([scheduler_1, scheduler_4], durations=[30,])
+ ConcatScheduler([scheduler_1, scheduler_4], durations=[30])
with pytest.raises(ValueError, match=r"schedulers should be related to same optimizer"):
- ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30,])
+ ConcatScheduler.simulate_values(3, [scheduler_1, scheduler_3], durations=[30])
def test_concat_scheduler_state_dict():
@@ -1300,5 +1300,5 @@ def save_lr(engine):
trainer.run([0] * 15, max_epochs=1)
assert lrs == list(
- map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95,],)
+ map(pytest.approx, [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95])
)
diff --git a/tests/ignite/handlers/test_state_param_scheduler.py b/tests/ignite/handlers/test_state_param_scheduler.py
index 2e5e8a3f7fe6..97d04555f3d3 100644
--- a/tests/ignite/handlers/test_state_param_scheduler.py
+++ b/tests/ignite/handlers/test_state_param_scheduler.py
@@ -61,9 +61,7 @@ def __call__(self, event_index):
)
-@pytest.mark.parametrize(
- "max_epochs, milestones_values, save_history, expected_param_history", [config1, config2],
-)
+@pytest.mark.parametrize("max_epochs, milestones_values, save_history, expected_param_history", [config1, config2])
def test_pwlinear_scheduler_linear_increase_history(
max_epochs, milestones_values, save_history, expected_param_history
):
@@ -87,9 +85,7 @@ def test_pwlinear_scheduler_linear_increase_history(
pw_linear_step_parameter_scheduler.load_state_dict(state_dict)
-@pytest.mark.parametrize(
- "max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)]),],
-)
+@pytest.mark.parametrize("max_epochs, milestones_values", [(3, [(3, 12), (5, 10)]), (5, [(10, 12), (20, 10)])])
def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values):
# Testing step_constant
engine = Engine(lambda e, b: None)
@@ -106,7 +102,7 @@ def test_pwlinear_scheduler_step_constant(max_epochs, milestones_values):
@pytest.mark.parametrize(
"max_epochs, milestones_values, expected_val",
- [(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0),],
+ [(2, [(0, 0), (3, 10)], 6.666666666666667), (10, [(0, 0), (20, 10)], 5.0)],
)
def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expected_val):
# Testing linear increase
@@ -122,12 +118,8 @@ def test_pwlinear_scheduler_linear_increase(max_epochs, milestones_values, expec
linear_state_parameter_scheduler.load_state_dict(state_dict)
-@pytest.mark.parametrize(
- "max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])],
-)
-def test_pwlinear_scheduler_max_value(
- max_epochs, milestones_values,
-):
+@pytest.mark.parametrize("max_epochs, milestones_values,", [(3, [(0, 0), (3, 10)]), (40, [(0, 0), (20, 10)])])
+def test_pwlinear_scheduler_max_value(max_epochs, milestones_values):
# Testing max_value
engine = Engine(lambda e, b: None)
linear_state_parameter_scheduler = PiecewiseLinearStateScheduler(
@@ -162,9 +154,7 @@ def test_piecewiselinear_asserts():
PiecewiseLinearStateScheduler(param_name="linear_scheduled_param", milestones_values=[(0.5, 1)])
-@pytest.mark.parametrize(
- "max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)],
-)
+@pytest.mark.parametrize("max_epochs, initial_value, gamma", [(3, 10, 0.99), (40, 5, 0.98)])
def test_exponential_scheduler(max_epochs, initial_value, gamma):
engine = Engine(lambda e, b: None)
exp_state_parameter_scheduler = ExpStateScheduler(
@@ -178,12 +168,8 @@ def test_exponential_scheduler(max_epochs, initial_value, gamma):
exp_state_parameter_scheduler.load_state_dict(state_dict)
-@pytest.mark.parametrize(
- "max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)],
-)
-def test_step_scheduler(
- max_epochs, initial_value, gamma, step_size,
-):
+@pytest.mark.parametrize("max_epochs, initial_value, gamma, step_size", [(3, 10, 0.99, 5), (40, 5, 0.98, 22)])
+def test_step_scheduler(max_epochs, initial_value, gamma, step_size):
engine = Engine(lambda e, b: None)
step_state_parameter_scheduler = StepStateScheduler(
param_name="step_scheduled_param",
@@ -206,11 +192,9 @@ def test_step_scheduler(
@pytest.mark.parametrize(
- "max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])],
+ "max_epochs, initial_value, gamma, milestones", [(3, 10, 0.99, [3, 6]), (40, 5, 0.98, [3, 6, 9, 10, 11])]
)
-def test_multistep_scheduler(
- max_epochs, initial_value, gamma, milestones,
-):
+def test_multistep_scheduler(max_epochs, initial_value, gamma, milestones):
engine = Engine(lambda e, b: None)
multi_step_state_parameter_scheduler = MultiStepStateScheduler(
param_name="multistep_scheduled_param",
@@ -368,7 +352,7 @@ def test_multiple_scheduler_with_save_history():
if "save_history" in config:
del config["save_history"]
_scheduler = scheduler(**config, save_history=True)
- _scheduler.attach(engine_multiple_schedulers,)
+ _scheduler.attach(engine_multiple_schedulers)
engine_multiple_schedulers.run([0] * 8, max_epochs=2)
@@ -507,7 +491,7 @@ def test_param_scheduler_with_ema_handler():
ema_handler.attach(trainer, name=param_name, event=Events.ITERATION_COMPLETED)
ema_decay_scheduler = PiecewiseLinearStateScheduler(
- param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999),], save_history=True
+ param_name=param_name, milestones_values=[(0, 0.0), (10, 0.999)], save_history=True
)
ema_decay_scheduler.attach(trainer, Events.ITERATION_COMPLETED)
trainer.run(data, max_epochs=20)
diff --git a/tests/ignite/metrics/gan/test_fid.py b/tests/ignite/metrics/gan/test_fid.py
index fdf2ae2cea2d..6244a636f070 100644
--- a/tests/ignite/metrics/gan/test_fid.py
+++ b/tests/ignite/metrics/gan/test_fid.py
@@ -121,9 +121,7 @@ def test_wrong_inputs():
err_str = (
"Number of Training Features and Testing Features should be equal (torch.Size([9, 2]) != torch.Size([5, 2]))"
)
- with pytest.raises(
- ValueError, match=re.escape(err_str),
- ):
+ with pytest.raises(ValueError, match=re.escape(err_str)):
FID(num_features=2, feature_extractor=torch.nn.Identity()).update((torch.rand(9, 2), torch.rand(5, 2)))
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module"):
diff --git a/tests/ignite/metrics/nlp/test_rouge.py b/tests/ignite/metrics/nlp/test_rouge.py
index ee69708e93b0..2f1e24fcb892 100644
--- a/tests/ignite/metrics/nlp/test_rouge.py
+++ b/tests/ignite/metrics/nlp/test_rouge.py
@@ -85,7 +85,7 @@ def test_rouge_n_alpha(ngram, candidate, reference, expected):
@pytest.mark.parametrize(
- "candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5,],
+ "candidates, references", [corpus.sample_1, corpus.sample_2, corpus.sample_3, corpus.sample_4, corpus.sample_5]
)
def test_rouge_metrics(candidates, references):
for multiref in ["average", "best"]:
diff --git a/tests/ignite/metrics/nlp/test_utils.py b/tests/ignite/metrics/nlp/test_utils.py
index 8cf267a68bdc..5c6dbb446934 100644
--- a/tests/ignite/metrics/nlp/test_utils.py
+++ b/tests/ignite/metrics/nlp/test_utils.py
@@ -8,7 +8,7 @@
[
([], 1, [], []),
([0, 1, 2], 1, [(0,), (1,), (2,)], [1, 1, 1]),
- ([0, 1, 2], 2, [(0, 1,), (1, 2,),], [1, 1],),
+ ([0, 1, 2], 2, [(0, 1), (1, 2)], [1, 1]),
([0, 1, 2], 3, [(0, 1, 2)], [1]),
([0, 0, 0], 1, [(0,)], [3]),
([0, 0, 0], 2, [(0, 0)], [2]),
@@ -23,7 +23,7 @@ def test_ngrams(sequence, n, expected_keys, expected_values):
@pytest.mark.parametrize(
"seq_a, seq_b, expected",
- [([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4),],
+ [([], [], 0), ([0, 1, 2], [0, 1, 2], 3), ([0, 1, 2], [0, 3, 2], 2), ("academy", "abracadabra", 4)],
)
def test_lcs(seq_a, seq_b, expected):
assert lcs(seq_a, seq_b) == expected
diff --git a/tests/ignite/metrics/test_accumulation.py b/tests/ignite/metrics/test_accumulation.py
index 5f721aa16759..6777ba9f1caa 100644
--- a/tests/ignite/metrics/test_accumulation.py
+++ b/tests/ignite/metrics/test_accumulation.py
@@ -355,7 +355,7 @@ def update_fn(engine, batch):
assert len(true_val) == shape[-1]
np.testing.assert_almost_equal(
- state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol)),
+ state.metrics["agg_custom_var"].cpu().numpy(), true_val, decimal=int(np.log10(1.0 / tol))
)
size = 100
diff --git a/tests/ignite/metrics/test_accuracy.py b/tests/ignite/metrics/test_accuracy.py
index ba38afb8f699..da6d2a7511fc 100644
--- a/tests/ignite/metrics/test_accuracy.py
+++ b/tests/ignite/metrics/test_accuracy.py
@@ -47,7 +47,7 @@ def test_binary_wrong_inputs():
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
- acc.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long(),))
+ acc.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
with pytest.raises(ValueError, match=r"y must have shape of "):
# incompatible shapes
diff --git a/tests/ignite/metrics/test_loss.py b/tests/ignite/metrics/test_loss.py
index d425c017ca3f..a3ae8ce1053a 100644
--- a/tests/ignite/metrics/test_loss.py
+++ b/tests/ignite/metrics/test_loss.py
@@ -123,7 +123,7 @@ def test_gradient_based_loss():
def loss_fn(y_pred, x):
gradients = torch.autograd.grad(
- outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True,
+ outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True
)[0]
gradients = gradients.flatten(start_dim=1)
diff --git a/tests/ignite/metrics/test_metric.py b/tests/ignite/metrics/test_metric.py
index abf7193321be..6be8e111684d 100644
--- a/tests/ignite/metrics/test_metric.py
+++ b/tests/ignite/metrics/test_metric.py
@@ -437,7 +437,7 @@ def data(y_pred, y):
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
- assert set(state.metrics.keys()) == set([metric_name,])
+ assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
@@ -961,7 +961,7 @@ def compute(self):
evaluator.run(data)
-@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)],])
+@pytest.mark.parametrize("shapes", [[(10,), ()], [(5, 32, 32), (5, 32, 32)]])
def test_list_of_tensors_and_numbers(shapes):
def check_fn(output):
assert len(output) == 2
diff --git a/tests/ignite/metrics/test_metrics_lambda.py b/tests/ignite/metrics/test_metrics_lambda.py
index 84d9bcce428c..72fce39885c6 100644
--- a/tests/ignite/metrics/test_metrics_lambda.py
+++ b/tests/ignite/metrics/test_metrics_lambda.py
@@ -395,7 +395,7 @@ def data(y_pred, y):
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
- assert set(state.metrics.keys()) == set([metric_name,])
+ assert set(state.metrics.keys()) == set([metric_name])
np_y_pred = y_pred.numpy().ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
diff --git a/tests/ignite/metrics/test_precision.py b/tests/ignite/metrics/test_precision.py
index 74003a0724b0..5f07d5413526 100644
--- a/tests/ignite/metrics/test_precision.py
+++ b/tests/ignite/metrics/test_precision.py
@@ -38,7 +38,7 @@ def test_binary_wrong_inputs():
with pytest.raises(ValueError, match=r"For binary cases, y_pred must be comprised of 0's and 1's"):
# y_pred values are not thresholded to 0, 1 values
- pr.update((torch.rand(10,), torch.randint(0, 2, size=(10,)).long(),))
+ pr.update((torch.rand(10), torch.randint(0, 2, size=(10,)).long()))
assert pr._updated is False
with pytest.raises(ValueError, match=r"y must have shape of"):
diff --git a/tests/ignite/metrics/test_recall.py b/tests/ignite/metrics/test_recall.py
index 4d973dee16cf..12c47e9f27e2 100644
--- a/tests/ignite/metrics/test_recall.py
+++ b/tests/ignite/metrics/test_recall.py
@@ -357,7 +357,7 @@ def _test(average):
re.update((y_pred, y))
assert re._updated is True
- y_pred = torch.zeros(4,)
+ y_pred = torch.zeros(4)
y = torch.ones(4).long()
with pytest.raises(RuntimeError):
diff --git a/tests/ignite/metrics/test_root_mean_squared_error.py b/tests/ignite/metrics/test_root_mean_squared_error.py
index 7c0ccee4d60b..a92e5908deb4 100644
--- a/tests/ignite/metrics/test_root_mean_squared_error.py
+++ b/tests/ignite/metrics/test_root_mean_squared_error.py
@@ -43,10 +43,10 @@ def _test(y_pred, y, batch_size):
def get_test_cases():
test_cases = [
- (torch.empty(10,).uniform_(0, 10), torch.empty(10,).uniform_(0, 10), 1,),
+ (torch.empty(10).uniform_(0, 10), torch.empty(10).uniform_(0, 10), 1),
(torch.empty(10, 1).uniform_(-10, 10), torch.empty(10, 1).uniform_(-10, 10), 1),
# updated batches
- (torch.empty(50,).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16,),
+ (torch.empty(50).uniform_(0, 10), torch.empty(50).uniform_(0, 10), 16),
(torch.empty(50, 1).uniform_(-10, 10), torch.empty(50, 1).uniform_(-10, 10), 16),
]
diff --git a/tests/ignite/metrics/test_running_average.py b/tests/ignite/metrics/test_running_average.py
index 857e929c8509..9d034c1c781b 100644
--- a/tests/ignite/metrics/test_running_average.py
+++ b/tests/ignite/metrics/test_running_average.py
@@ -192,11 +192,11 @@ def assert_equal_running_avg_output_values(engine):
def test_multiple_attach():
n_iters = 100
- errD_values = iter(np.random.rand(n_iters,))
- errG_values = iter(np.random.rand(n_iters,))
- D_x_values = iter(np.random.rand(n_iters,))
- D_G_z1 = iter(np.random.rand(n_iters,))
- D_G_z2 = iter(np.random.rand(n_iters,))
+ errD_values = iter(np.random.rand(n_iters))
+ errG_values = iter(np.random.rand(n_iters))
+ D_x_values = iter(np.random.rand(n_iters))
+ D_G_z1 = iter(np.random.rand(n_iters))
+ D_G_z2 = iter(np.random.rand(n_iters))
def update_fn(engine, batch):
return {
diff --git a/tests/ignite/test_utils.py b/tests/ignite/test_utils.py
index 542f06e28cc5..81e30de97c20 100644
--- a/tests/ignite/test_utils.py
+++ b/tests/ignite/test_utils.py
@@ -191,29 +191,26 @@ def func_no_docs():
# Test on function with docs, @deprecated without reasons
@deprecated("0.4.2", "0.6.0")
def func_no_reasons():
- """Docs are cool
- """
+ """Docs are cool"""
return 24
- assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool\n .. deprecated:: 0.4.2"
+ assert func_no_reasons.__doc__ == "**Deprecated function**.\n\n Docs are cool.. deprecated:: 0.4.2"
# Test on function with docs, @deprecated with reasons
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_no_warnings():
- """Docs are very cool
- """
+ """Docs are very cool"""
return 24
assert (
func_no_warnings.__doc__
- == "**Deprecated function**.\n\n Docs are very cool\n .. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2"
+ == "**Deprecated function**.\n\n Docs are very cool.. deprecated:: 0.4.2\n\n\t\n\t- r1\n\t- r2"
)
# Tests that the function emits DeprecationWarning
@deprecated("0.4.2", "0.6.0", reasons=("r1", "r2"))
def func_check_warning():
- """Docs are very ...
- """
+ """Docs are very ..."""
return 24
with pytest.deprecated_call():
@@ -252,7 +249,7 @@ def test_hash_checkpoint(tmp_path):
model = squeezenet1_0()
torch.hub.download_url_to_file(
- "https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt",
+ "https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth", f"{tmp_path}/squeezenet1_0.pt"
)
hash_checkpoint_path, sha_hash = hash_checkpoint(f"{tmp_path}/squeezenet1_0.pt", str(tmp_path))
model.load_state_dict(torch.load(str(hash_checkpoint_path), "cpu"), True)
diff --git a/tests/run_code_style.bat b/tests/run_code_style.bat
index 22b84b1e5eb0..c5dea37aba9f 100644
--- a/tests/run_code_style.bat
+++ b/tests/run_code_style.bat
@@ -22,7 +22,7 @@ mypy --config-file mypy.ini
goto end
:install
-pip install flake8 "black==19.10b0" "isort==5.7.0" "mypy==0.910"
+pip install flake8 "black==21.12b0" "isort==5.7.0" "mypy==0.910"
goto end
:end
diff --git a/tests/run_code_style.sh b/tests/run_code_style.sh
index 8899678cc6d1..4e0ec75beb4b 100755
--- a/tests/run_code_style.sh
+++ b/tests/run_code_style.sh
@@ -12,5 +12,5 @@ elif [ $1 = "fmt" ]; then
elif [ $1 = "mypy" ]; then
mypy --config-file mypy.ini
elif [ $1 = "install" ]; then
- pip install flake8 "black==19.10b0" "isort==5.7.0" "mypy==0.910"
+ pip install flake8 "black==21.12b0" "isort==5.7.0" "mypy==0.910"
fi