Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion mmdeploy/codebase/base/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ def evaluate_outputs(model_cfg,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None,
**kwargs):
"""Perform post-processing to predictions of model.

Expand All @@ -244,13 +245,16 @@ def evaluate_outputs(model_cfg,
for single label dataset, and "mAP", "CP", "CR", "CF1",
"OP", "OR", "OF1" for multi-label dataset in mmcls.
Defaults is `None`.
out (str): Output result file in pickle format, defaults to `None`.
out (str): Output inference results in pickle format, defaults to
`None`.
metric_options (dict): Custom options for evaluation, will be
kwargs for dataset.evaluate() function. Defaults to `None`.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
pass

Expand Down
22 changes: 14 additions & 8 deletions mmdeploy/codebase/mmcls/deploy/classification.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, Dict, Optional, Sequence, Tuple, Union

import mmcv
Expand Down Expand Up @@ -208,7 +209,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False) -> None:
format_only: bool = False,
log_file: Optional[str] = None) -> None:
"""Perform post-processing to predictions of model.

Args:
Expand All @@ -224,13 +226,17 @@ def evaluate_outputs(model_cfg: mmcv.Config,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
Default: False.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
import warnings
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file, log_level=logging.INFO)

if metrics:
results = dataset.evaluate(outputs, metrics, metric_options)
for k, v in results.items():
print(f'\n{k} : {v:.2f}')
logger.info(f'{k} : {v:.2f}')
else:
warnings.warn('Evaluation metrics are not specified.')
scores = np.vstack(outputs)
Expand All @@ -243,13 +249,13 @@ def evaluate_outputs(model_cfg: mmcv.Config,
'pred_class': pred_class
}
if not out:
print('\nthe predicted result for the first element is '
f'pred_score = {pred_score[0]:.2f}, '
f'pred_label = {pred_label[0]} '
f'and pred_class = {pred_class[0]}. '
'Specify --out to save all results to files.')
logger.info('the predicted result for the first element is '
f'pred_score = {pred_score[0]:.2f}, '
f'pred_label = {pred_label[0]} '
f'and pred_class = {pred_class[0]}. '
'Specify --out to save all results to files.')
if out:
print(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(results, out)

def get_preprocess(self) -> Dict:
Expand Down
15 changes: 10 additions & 5 deletions mmdeploy/codebase/mmdet/deploy/object_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from mmcv.parallel import DataContainer
from torch.utils.data import Dataset

from mmdeploy.utils import Task, get_root_logger
from mmdeploy.utils import Task
from mmdeploy.utils.config_utils import get_input_shape, is_dynamic_shape
from ...base import BaseTask
from .mmdetection import MMDET_TASK
Expand Down Expand Up @@ -235,7 +235,8 @@ def evaluate_outputs(model_cfg: mmcv.Config,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False):
format_only: bool = False,
log_file: Optional[str] = None):
"""Perform post-processing to predictions of model.

Args:
Expand All @@ -252,10 +253,14 @@ def evaluate_outputs(model_cfg: mmcv.Config,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file)

if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
Expand All @@ -269,7 +274,7 @@ def evaluate_outputs(model_cfg: mmcv.Config,
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
logger.info(dataset.evaluate(outputs, **eval_kwargs))

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Expand Down
14 changes: 9 additions & 5 deletions mmdeploy/codebase/mmedit/deploy/super_resolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmedit.deploy.mmediting import MMEDIT_TASK
from mmdeploy.utils import Task, get_input_shape, get_root_logger, load_config
from mmdeploy.utils import Task, get_input_shape, load_config


def process_model_config(model_cfg: mmcv.Config,
Expand Down Expand Up @@ -249,6 +249,7 @@ def evaluate_outputs(model_cfg,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None,
**kwargs) -> None:
"""Evaluation function implemented in mmedit.

Expand All @@ -265,17 +266,20 @@ def evaluate_outputs(model_cfg,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file)

if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(outputs, out)
# The Dataset doesn't need metrics
print('\n')
# print metrics
stats = dataset.evaluate(outputs)
for stat in stats:
print('Eval-{}: {}'.format(stat, stats[stat]))
logger.info('Eval-{}: {}'.format(stat, stats[stat]))

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Expand Down
17 changes: 11 additions & 6 deletions mmdeploy/codebase/mmocr/deploy/text_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from torch.utils.data import Dataset

from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape, get_root_logger
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK


Expand Down Expand Up @@ -246,15 +246,16 @@ def evaluate_outputs(model_cfg,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False):
format_only: bool = False,
log_file: Optional[str] = None):
"""Perform post-processing to predictions of model.

Args:
outputs (Sequence): A list of predictions of model inference.
dataset (Dataset): Input dataset to run test.
model_cfg (mmcv.Config): The model config.
metrics (str): Evaluation metrics, which depends on
the codebase and the dataset, e.g., e.g., "acc" for text
the codebase and the dataset, e.g., "acc" for text
recognition, and "hmean-iou" for text detection.
out (str): Output result file in pickle format, defaults to `None`.
metric_options (dict): Custom options for evaluation, will be
Expand All @@ -263,10 +264,14 @@ def evaluate_outputs(model_cfg,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file)

if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
Expand All @@ -280,7 +285,7 @@ def evaluate_outputs(model_cfg,
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
logger.info(dataset.evaluate(outputs, **eval_kwargs))

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Expand Down
17 changes: 11 additions & 6 deletions mmdeploy/codebase/mmocr/deploy/text_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from torch.utils.data import Dataset

from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape, get_root_logger
from mmdeploy.utils import Task, get_input_shape
from .mmocr import MMOCR_TASK


Expand Down Expand Up @@ -259,15 +259,16 @@ def evaluate_outputs(model_cfg: mmcv.Config,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False):
format_only: bool = False,
log_file: Optional[str] = None):
"""Perform post-processing to predictions of model.

Args:
model_cfg (mmcv.Config): The model config.
outputs (list): A list of predictions of model inference.
dataset (Dataset): Input dataset to run test.
metrics (str): Evaluation metrics, which depends on
the codebase and the dataset, e.g., e.g., "acc" for text
the codebase and the dataset, e.g., "acc" for text
recognition, and "hmean-iou" for text detection.
out (str): Output result file in pickle format, defaults to `None`.
metric_options (dict): Custom options for evaluation, will be
Expand All @@ -276,10 +277,14 @@ def evaluate_outputs(model_cfg: mmcv.Config,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file)

if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
Expand All @@ -293,7 +298,7 @@ def evaluate_outputs(model_cfg: mmcv.Config,
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
logger.info(dataset.evaluate(outputs, **eval_kwargs))

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Expand Down
15 changes: 10 additions & 5 deletions mmdeploy/codebase/mmseg/deploy/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from torch.utils.data import Dataset

from mmdeploy.codebase.base import BaseTask
from mmdeploy.utils import Task, get_input_shape, get_root_logger
from mmdeploy.utils import Task, get_input_shape
from .mmsegmentation import MMSEG_TASK


Expand Down Expand Up @@ -206,7 +206,8 @@ def evaluate_outputs(model_cfg,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False):
format_only: bool = False,
log_file: Optional[str] = None):
"""Perform post-processing to predictions of model.

Args:
Expand All @@ -223,16 +224,20 @@ def evaluate_outputs(model_cfg,
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server. Defaults
to `False`.
log_file (str | None): The file to write the evaluation results.
Defaults to `None` and the results will only print on stdout.
"""
from mmcv.utils import get_logger
logger = get_logger('test', log_file=log_file)

if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
logger.debug(f'writing results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
dataset.format_results(outputs, **kwargs)
if metrics:
dataset.evaluate(outputs, metrics, **kwargs)
dataset.evaluate(outputs, metrics, logger=logger, **kwargs)

def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Expand Down
24 changes: 8 additions & 16 deletions mmdeploy/utils/timer.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,16 @@
# Copyright (c) OpenMMLab. All rights reserved.
import io
import sys
import time
import warnings
from contextlib import contextmanager
from typing import Union
from typing import Optional

import torch
from mmcv.utils import get_logger


class TimeCounter:
"""A tool for counting inference time of backends."""
names = dict()
file = sys.stdout

# Avoid instantiating every time
@classmethod
Expand Down Expand Up @@ -76,10 +74,7 @@ def fun(*args, **kwargs):
msg = f'[{func.__name__}]-{count} times per count: '\
f'{times_per_count:.2f} ms, '\
f'{1000/times_per_count:.2f} FPS'
if cls.file != sys.stdout:
msg += '\n'
cls.file.write(msg)
cls.file.flush()
cls.logger.info(msg)

return result

Expand All @@ -94,7 +89,7 @@ def activate(cls,
warmup: int = 1,
log_interval: int = 1,
with_sync: bool = False,
file: Union[str, io.TextIOWrapper] = sys.stdout):
file: Optional[str] = None):
"""Activate the time counter.

Args:
Expand All @@ -104,13 +99,12 @@ def activate(cls,
log_interval (int): Interval between each log, default 1.
with_sync (bool): Whether use cuda synchronize for time counting,
default False.
file (str | io.TextIOWrapper): A file or file-like object to save
output messages. The default is `sys.stdout`.
file (str | None): The file to save output messages. The default
is `None`.
"""
assert warmup >= 1
if file != sys.stdout:
file = open(file, 'w+')
cls.file = file
logger = get_logger('test', log_file=file)
cls.logger = logger
if func_name is not None:
warnings.warn('func_name must be globally unique if you call '
'activate multiple times')
Expand All @@ -127,8 +121,6 @@ def activate(cls,
cls.names[name]['with_sync'] = with_sync
cls.names[name]['enable'] = True
yield
if file != sys.stdout:
cls.file.close()
if func_name is not None:
cls.names[func_name]['enable'] = False
else:
Expand Down
Loading