Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/trigger_circle_ci.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def trigger_new_pipeline(data, headers):
"https://circleci.com/api/v2/project/gh/pytorch/ignite/pipeline", data=json.dumps(data), headers=headers
)
assert_result(result, 201)
output = get_output(result.text, ["id",])
output = get_output(result.text, ["id"])
return output["id"]


Expand All @@ -46,7 +46,7 @@ def get_workflow_id(pipeline_id, headers):
while True:
result = requests.get(f"https://circleci.com/api/v2/pipeline/{pipeline_id}/workflow", headers=headers)
assert_result(result, 200)
output = get_output(result.text, ["items",])
output = get_output(result.text, ["items"])
items = output["items"]
if len(items) > 1:
raise RuntimeError(f"Incorrect number of workflow ids: {len(items)} != 1\n" f"items: {items}")
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ repos:
exclude_types: ["python", "jupyter", "shell", "gitignore"]

- repo: https://github.com/python/black
rev: 19.10b0
rev: 21.12b0
hooks:
- id: black
language_version: python3.8
Expand Down
8 changes: 4 additions & 4 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -109,11 +109,11 @@ If you modify the code, you will most probably also need to code some tests to e

- naming convention for files `test_*.py`, e.g. `test_precision.py`
- naming of testing functions `def test_*`, e.g. `def test_precision_on_random_data()`
- if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
- if test function should run on GPU, please **make sure to add `cuda`** in the test name, e.g. `def test_something_on_cuda()`.
Additionally, we may want to decorate it with `@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")`.
For more examples, please see https://github.com/pytorch/ignite/blob/master/tests/ignite/engine/test_create_supervised.py
- if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
conditions depending on the intended checks. For example, please see
- if test function checks distributed configuration, we have to mark the test as `@pytest.mark.distributed` and additional
conditions depending on the intended checks. For example, please see
https://github.com/pytorch/ignite/blob/master/tests/ignite/metrics/test_accuracy.py


Expand All @@ -131,7 +131,7 @@ format and check codebase for compliance with PEP8.
If you choose not to use pre-commit, you can take advantage of IDE extensions configured to black format or invoke
black manually to format files and commit them.

To install `flake8`, `black==19.10b0`, `isort==5.7.0` and `mypy`, please run
To install `flake8`, `black==21.12b0`, `isort==5.7.0` and `mypy`, please run
```bash
bash ./tests/run_code_style.sh install
```
Expand Down
2 changes: 1 addition & 1 deletion docker/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def main():
print(traceback.format_exc())
"""
try:
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True,)
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True)
assert isinstance(out, bytes), type(out)
out = out.decode("utf-8").strip()

Expand Down
4 changes: 2 additions & 2 deletions examples/contrib/cifar10/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,11 +223,11 @@ def get_dataflow(config):

# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)

test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader

Expand Down
2 changes: 1 addition & 1 deletion examples/contrib/cifar10/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
]
)

test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])


def get_train_test_datasets(path):
Expand Down
4 changes: 2 additions & 2 deletions examples/contrib/cifar10_qat/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,11 @@ def get_dataflow(config):

# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)

test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader

Expand Down
2 changes: 1 addition & 1 deletion examples/contrib/cifar10_qat/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
]
)

test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])


def get_train_test_datasets(path):
Expand Down
6 changes: 3 additions & 3 deletions examples/contrib/transformers/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,11 +228,11 @@ def get_dataflow(config):

# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True
)

test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False
)
return train_loader, test_loader

Expand All @@ -246,7 +246,7 @@ def initialize(config):
# Adapt model for distributed settings if configured
model = idist.auto_model(model)

optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"],)
optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"])
optimizer = idist.auto_optim(optimizer)
criterion = nn.BCEWithLogitsLoss()

Expand Down
14 changes: 6 additions & 8 deletions examples/gan/dcgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@


class Net(nn.Module):
""" A base class for both generator and the discriminator.
"""A base class for both generator and the discriminator.
Provides a common weight initialization scheme.

"""
Expand All @@ -56,7 +56,7 @@ def forward(self, x):


class Generator(Net):
""" Generator network.
"""Generator network.

Args:
nf (int): Number of filters in the second-to-last deconv layer
Expand Down Expand Up @@ -95,7 +95,7 @@ def forward(self, x):


class Discriminator(Net):
""" Discriminator network.
"""Discriminator network.

Args:
nf (int): Number of filters in the first conv layer.
Expand Down Expand Up @@ -133,9 +133,7 @@ def forward(self, x):


def check_manual_seed(seed):
""" If manual seed is not specified, choose a random one and communicate it to the user.

"""
"""If manual seed is not specified, choose a random one and communicate it to the user."""

seed = seed or random.randint(1, 10000)
random.seed(seed)
Expand Down Expand Up @@ -311,8 +309,8 @@ def step(engine, batch):
@trainer.on(Events.ITERATION_COMPLETED(every=PRINT_FREQ))
def print_logs(engine):
fname = os.path.join(output_dir, LOGS_FNAME)
columns = ["iteration",] + list(engine.state.metrics.keys())
values = [str(engine.state.iteration),] + [str(round(value, 5)) for value in engine.state.metrics.values()]
columns = ["iteration"] + list(engine.state.metrics.keys())
values = [str(engine.state.iteration)] + [str(round(value, 5)) for value in engine.state.metrics.values()]

with open(fname, "a") as f:
if f.tell() == 0:
Expand Down
12 changes: 4 additions & 8 deletions examples/mnist/mnist_save_resume_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@ def forward(self, x):


def get_data_loaders(train_batch_size, val_batch_size):
"""Method to setup data loaders: train_loader and val_loader
"""
"""Method to setup data loaders: train_loader and val_loader"""
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])

train_loader = DataLoader(
Expand All @@ -71,8 +70,7 @@ def get_data_loaders(train_batch_size, val_batch_size):


def log_model_weights(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model weights: print and dump into a file
"""
"""Helper method to log norms of model weights: print and dump into a file"""
assert model and fp
output = {"total": 0.0}
max_counter = 5
Expand All @@ -92,8 +90,7 @@ def log_model_weights(engine, model=None, fp=None, **kwargs):


def log_model_grads(engine, model=None, fp=None, **kwargs):
"""Helper method to log norms of model gradients: print and dump into a file
"""
"""Helper method to log norms of model gradients: print and dump into a file"""
assert model and fp
output = {"grads/total": 0.0}
max_counter = 5
Expand All @@ -116,8 +113,7 @@ def log_model_grads(engine, model=None, fp=None, **kwargs):


def log_data_stats(engine, fp=None, **kwargs):
"""Helper method to log mean/std of input batch of images and median of batch of targets.
"""
"""Helper method to log mean/std of input batch of images and median of batch of targets."""
assert fp
x, y = engine.state.batch
output = {
Expand Down
2 changes: 1 addition & 1 deletion examples/mnist/mnist_with_tensorboard_on_tpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, lo

# Create trainer and evaluator
trainer = create_supervised_trainer(
model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item(),]
model, optimizer, criterion, device=device, output_transform=lambda x, y, y_pred, loss: [loss.item()]
)

val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,16 +52,16 @@ def get_train_val_loaders(
train_eval_ds = train_ds

train_loader = idist.auto_dataloader(
train_ds, shuffle=True, batch_size=batch_size, num_workers=num_workers, drop_last=True,
train_ds, shuffle=True, batch_size=batch_size, num_workers=num_workers, drop_last=True
)

val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
val_loader = idist.auto_dataloader(
val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False,
val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False
)

train_eval_loader = idist.auto_dataloader(
train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False,
train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False
)

return train_loader, val_loader, train_eval_loader
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def train_update_function(engine, batch):
"supervised batch loss": loss.item(),
}

output_names = getattr(config, "output_names", ["supervised batch loss",])
output_names = getattr(config, "output_names", ["supervised batch loss"])
lr_scheduler = config.lr_scheduler

trainer = Engine(train_update_function)
Expand Down Expand Up @@ -97,7 +97,7 @@ def create_evaluators(model, metrics, config):
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,7 @@ def _plx_log_params(params_dict):
from polyaxon_client.tracking import Experiment

plx_exp = Experiment()
plx_exp.log_params(
**{"pytorch version": torch.__version__, "ignite version": ignite.__version__,}
)
plx_exp.log_params(**{"pytorch version": torch.__version__, "ignite version": ignite.__version__})
plx_exp.log_params(**params_dict)


Expand All @@ -86,9 +84,7 @@ def _mlflow_log_artifact(fp):

@idist.one_rank_only()
def _mlflow_log_params(params_dict):
mlflow.log_params(
{"pytorch version": torch.__version__, "ignite version": ignite.__version__,}
)
mlflow.log_params({"pytorch version": torch.__version__, "ignite version": ignite.__version__})
mlflow.log_params(params_dict)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def model_output_transform(output):
nesterov = False

optimizer = optim.SGD(
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()},],
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def model_output_transform(output):
nesterov = False

optimizer = optim.SGD(
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()},],
[{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}],
lr=1.0,
momentum=momentum,
weight_decay=weight_decay,
Expand Down
10 changes: 5 additions & 5 deletions examples/references/segmentation/pascal_voc2012/dataflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def __getitem__(self, index):
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index],},
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}

return {"image": img, "mask": mask}
Expand All @@ -93,20 +93,20 @@ def __getitem__(self, index):
return {
"image": img,
"mask": mask,
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index],},
"meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]},
}

return {"image": img, "mask": mask}


def get_train_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(
root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta,
root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta
)


def get_val_dataset(root_path, return_meta=False):
return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta,)
return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta)


def get_train_noval_sbdataset(root_path, return_meta=False):
Expand Down Expand Up @@ -187,7 +187,7 @@ def get_train_val_loaders(


def get_inference_dataloader(
root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None,
root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None
):
assert mode in ("train", "test"), "Mode should be 'train' or 'test'"

Expand Down
20 changes: 6 additions & 14 deletions examples/references/segmentation/pascal_voc2012/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def custom_event_filter(_, val_iteration):
tb_logger.attach(
evaluator,
log_handler=vis.predictions_gt_images_handler(
img_denormalize_fn=img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation",
img_denormalize_fn=img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
Expand Down Expand Up @@ -296,7 +296,7 @@ def setup_experiment_tracking(config, with_clearml, task_type="training"):

schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema

task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type,)
task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type)
task.connect_configuration(config.config_filepath.as_posix())

task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix())
Expand All @@ -313,12 +313,8 @@ def setup_experiment_tracking(config, with_clearml, task_type="training"):
output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S")
output_path.mkdir(parents=True, exist_ok=True)

shutil.copyfile(
config.script_filepath.as_posix(), output_path / config.script_filepath.name,
)
shutil.copyfile(
config.config_filepath.as_posix(), output_path / config.config_filepath.name,
)
shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name)
shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name)

output_path = output_path.as_posix()
return Path(idist.broadcast(output_path, src=0))
Expand Down Expand Up @@ -419,9 +415,7 @@ def evaluation(local_rank, config, logger, with_clearml):
# Setup Tensorboard logger
if rank == 0:
tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix())
tb_logger.attach_output_handler(
evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all",
)
tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all")

# Log confusion matrix to ClearML:
if with_clearml:
Expand Down Expand Up @@ -473,6 +467,4 @@ def run_evaluation(config_filepath, backend="nccl", with_clearml=True):

if __name__ == "__main__":

fire.Fire(
{"download": download_datasets, "training": run_training, "eval": run_evaluation,}
)
fire.Fire({"download": download_datasets, "training": run_training, "eval": run_evaluation})
Loading