Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ All notable changes to this project will be documented in this file.

- Enable configurable confidence threshold for otx eval and export(<https://github.com/openvinotoolkit/training_extensions/pull/2388>)
- Add YOLOX variants as new object detector models (<https://github.com/openvinotoolkit/training_extensions/pull/2402>)
- Enable FeatureVectorHook to support action tasks(<https://github.com/openvinotoolkit/training_extensions/pull/2408>)

### Enhancements

Expand Down
42 changes: 22 additions & 20 deletions src/otx/algorithms/action/adapters/mmaction/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import glob
import os
import time
from contextlib import nullcontext
from copy import deepcopy
from functools import partial
from typing import Dict, Optional, Union
Expand All @@ -35,6 +36,10 @@
Exporter,
)
from otx.algorithms.action.task import OTXActionTask
from otx.algorithms.common.adapters.mmcv.hooks.recording_forward_hook import (
BaseRecordingForwardHook,
FeatureVectorHook,
)
from otx.algorithms.common.adapters.mmcv.utils import (
adapt_batch_size,
build_data_parallel,
Expand Down Expand Up @@ -363,8 +368,7 @@ def _infer_model(
)
)

dump_features = False
dump_saliency_map = False
dump_features = True

self._init_task()

Expand Down Expand Up @@ -400,6 +404,7 @@ def _infer_model(
model = self.build_model(cfg, fp16=cfg.get("fp16", False))
model.CLASSES = target_classes
model.eval()
feature_model = model
model = build_data_parallel(model, cfg, distributed=False)

# InferenceProgressCallback (Time Monitor enable into Infer task)
Expand All @@ -423,32 +428,29 @@ def hook(module, inp, outp):
feature_vectors = []
saliency_maps = []

def dump_features_hook():
raise NotImplementedError("get_feature_vector function for mmaction is not implemented")

# pylint: disable=unused-argument
def dummy_dump_features_hook(model, inp, out):
feature_vectors.append(None)

def dump_saliency_hook():
raise NotImplementedError("get_saliency_map for mmaction is not implemented")

# pylint: disable=unused-argument
def dummy_dump_saliency_hook(model, inp, out):
saliency_maps.append(None)

feature_vector_hook = dump_features_hook if dump_features else dummy_dump_features_hook
saliency_map_hook = dump_saliency_hook if dump_saliency_map else dummy_dump_saliency_hook
if not dump_features:
feature_vector_hook: Union[nullcontext, BaseRecordingForwardHook] = nullcontext()
else:
feature_vector_hook = FeatureVectorHook(feature_model)
saliency_hook = nullcontext()

prog_bar = ProgressBar(len(dataloader))
with model.module.backbone.register_forward_hook(feature_vector_hook):
with model.module.backbone.register_forward_hook(saliency_map_hook):
with feature_vector_hook:
with saliency_hook:
for data in dataloader:
with torch.no_grad():
result = model(return_loss=False, **data)
eval_predictions.extend(result)
for _ in range(videos_per_gpu):
prog_bar.update()
if isinstance(feature_vector_hook, nullcontext):
feature_vectors = [None] * len(mm_dataset)
else:
feature_vectors = feature_vector_hook.records
if isinstance(saliency_hook, nullcontext):
saliency_maps = [None] * len(mm_dataset)
else:
saliency_maps = saliency_hook.records
prog_bar.file.write("\n")

for key in ["interval", "tmpdir", "start", "gpu_collect", "save_best", "rule", "dynamic_intervals"]:
Expand Down
29 changes: 16 additions & 13 deletions tests/unit/algorithms/action/adapters/mmaction/test_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,15 +64,11 @@ class MockModel(nn.Module):

def __init__(self, task_type):
super().__init__()
self.module = MockModule()
self.module.backbone = MockModule()
self.backbone = MockModule()
self.task_type = task_type

def forward(self, return_loss: bool, imgs: DatasetItemEntity):
forward_hooks = list(self.module.backbone._forward_hooks.values())
for hook in forward_hooks:
hook(1, 2, 3)
feat = self.backbone(torch.randn(1, 400, 8, 7, 7)) # bs, channel, video_len, h, w
if self.task_type == "cls":
return np.array([[0, 0, 1]])
return [[np.array([[0, 0, 1, 1, 0.1]]), np.array([[0, 0, 1, 1, 0.2]]), np.array([[0, 0, 1, 1, 0.7]])]]
Expand All @@ -96,6 +92,9 @@ def evaluate(self, prediction, *args, **kwargs):
else:
return {"[email protected]": 1.0}

def __len__(self):
return len(self.dataset)


class MockDataLoader:
"""Mock class for data loader."""
Expand Down Expand Up @@ -130,6 +129,10 @@ def export(self):
f.write(dummy_data)


def return_model(model, *args, **kwargs):
return model


class TestMMActionTask:
"""Test class for MMActionTask.

Expand Down Expand Up @@ -198,7 +201,7 @@ def test_train(self, mocker) -> None:
mocker.patch.object(MMActionTask, "get_model_ckpt", return_value="fake_weight")
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("cls"),
side_effect=return_model,
)
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.train_model",
Expand All @@ -217,7 +220,7 @@ def test_train(self, mocker) -> None:
_config = ModelConfiguration(ActionConfig(), self.cls_label_schema)
output_model = ModelEntity(self.cls_dataset, _config)
self.cls_task.train(self.cls_dataset, output_model)
output_model.performance == 1.0
assert output_model.performance.score.value == 1.0
assert self.cls_task._recipe_cfg.data.workers_per_gpu == num_cpu // num_gpu # test adaptive num_workers

mocker.patch(
Expand All @@ -231,12 +234,12 @@ def test_train(self, mocker) -> None:
mocker.patch.object(MMActionTask, "build_model", return_value=MockModel("det"))
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("det"),
side_effect=return_model,
)
_config = ModelConfiguration(ActionConfig(), self.det_label_schema)
output_model = ModelEntity(self.det_dataset, _config)
self.det_task.train(self.det_dataset, output_model)
output_model.performance == 1.0
assert output_model.performance.score.value == 1.0
assert self.cls_task._recipe_cfg.data.workers_per_gpu == num_cpu // num_gpu # test adaptive num_workers

@e2e_pytest_unit
Expand All @@ -261,7 +264,7 @@ def test_infer(self, mocker) -> None:
mocker.patch.object(MMActionTask, "build_model", return_value=MockModel("cls"))
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("cls"),
side_effect=return_model,
)

inference_parameters = InferenceParameters(is_evaluation=True)
Expand All @@ -280,7 +283,7 @@ def test_infer(self, mocker) -> None:
mocker.patch.object(MMActionTask, "build_model", return_value=MockModel("det"))
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("det"),
side_effect=return_model,
)
inference_parameters = InferenceParameters(is_evaluation=True)
outputs = self.det_task.infer(self.det_dataset, inference_parameters)
Expand Down Expand Up @@ -397,7 +400,7 @@ def test_geti_scenario(self, mocker):
mocker.patch.object(MMActionTask, "get_model_ckpt", return_value="fake_weight")
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("cls"),
side_effect=return_model,
)
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.train_model",
Expand Down Expand Up @@ -428,7 +431,7 @@ def test_geti_scenario(self, mocker):
mocker.patch.object(MMActionTask, "build_model", return_value=MockModel("cls"))
mocker.patch(
"otx.algorithms.action.adapters.mmaction.task.build_data_parallel",
return_value=MockModel("cls"),
side_effect=return_model,
)

inference_parameters = InferenceParameters(is_evaluation=True)
Expand Down