Add files using upload-large-folder tool
Browse files- testbed/openvinotoolkit__datumaro/datumaro/cli/__init__.py +4 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/commands/convert.py +137 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/commands/export.py +8 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/commands/remove.py +8 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/__init__.py +6 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/item/__init__.py +36 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/project/__init__.py +831 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/source/__init__.py +273 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/util/__init__.py +74 -0
- testbed/openvinotoolkit__datumaro/datumaro/cli/util/project.py +39 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/__init__.py +5 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/algorithms/__init__.py +5 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/cli_plugin.py +44 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/config_model.py +63 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/converter.py +76 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/dataset.py +186 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/dataset_filter.py +261 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/extractor.py +657 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/launcher.py +67 -0
- testbed/openvinotoolkit__datumaro/datumaro/components/operations.py +1503 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/__init__.py +4 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/details/ac.py +116 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/details/representation.py +62 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/__init__.py +0 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/converter.py +597 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/extractor.py +251 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/format.py +23 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/importer.py +95 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/__init__.py +0 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/converter.py +332 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/extractor.py +311 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/extractor.py +152 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/__init__.py +0 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/format.py +13 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/extractor.py +296 -0
- testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/converter.py +105 -0
- testbed/openvinotoolkit__datumaro/tests/test_project.py +578 -0
testbed/openvinotoolkit__datumaro/datumaro/cli/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
testbed/openvinotoolkit__datumaro/datumaro/cli/commands/convert.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
import logging as log
|
| 8 |
+
import os
|
| 9 |
+
import os.path as osp
|
| 10 |
+
|
| 11 |
+
from datumaro.components.project import Environment
|
| 12 |
+
|
| 13 |
+
from ..contexts.project import FilterModes
|
| 14 |
+
from ..util import CliException, MultilineFormatter, make_file_name
|
| 15 |
+
from ..util.project import generate_next_file_name
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def build_parser(parser_ctor=argparse.ArgumentParser):
|
| 19 |
+
builtin_importers = sorted(Environment().importers.items)
|
| 20 |
+
builtin_converters = sorted(Environment().converters.items)
|
| 21 |
+
|
| 22 |
+
parser = parser_ctor(help="Convert an existing dataset to another format",
|
| 23 |
+
description="""
|
| 24 |
+
Converts a dataset from one format to another.
|
| 25 |
+
You can add your own formats using a project.|n
|
| 26 |
+
|n
|
| 27 |
+
Supported input formats: %s|n
|
| 28 |
+
|n
|
| 29 |
+
Supported output formats: %s|n
|
| 30 |
+
|n
|
| 31 |
+
Examples:|n
|
| 32 |
+
- Export a dataset as a PASCAL VOC dataset, include images:|n
|
| 33 |
+
|s|sconvert -i src/path -f voc -- --save-images|n
|
| 34 |
+
|n
|
| 35 |
+
- Export a dataset as a COCO dataset to a specific directory:|n
|
| 36 |
+
|s|sconvert -i src/path -f coco -o path/I/like/
|
| 37 |
+
""" % (', '.join(builtin_importers), ', '.join(builtin_converters)),
|
| 38 |
+
formatter_class=MultilineFormatter)
|
| 39 |
+
|
| 40 |
+
parser.add_argument('-i', '--input-path', default='.', dest='source',
|
| 41 |
+
help="Path to look for a dataset")
|
| 42 |
+
parser.add_argument('-if', '--input-format',
|
| 43 |
+
help="Input dataset format. Will try to detect, if not specified.")
|
| 44 |
+
parser.add_argument('-f', '--output-format', required=True,
|
| 45 |
+
help="Output format")
|
| 46 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir',
|
| 47 |
+
help="Directory to save output (default: a subdir in the current one)")
|
| 48 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 49 |
+
help="Overwrite existing files in the save directory")
|
| 50 |
+
parser.add_argument('-e', '--filter',
|
| 51 |
+
help="Filter expression for dataset items")
|
| 52 |
+
parser.add_argument('--filter-mode', default=FilterModes.i.name,
|
| 53 |
+
type=FilterModes.parse,
|
| 54 |
+
help="Filter mode (options: %s; default: %s)" % \
|
| 55 |
+
(', '.join(FilterModes.list_options()) , '%(default)s'))
|
| 56 |
+
parser.add_argument('extra_args', nargs=argparse.REMAINDER,
|
| 57 |
+
help="Additional arguments for output format (pass '-- -h' for help)")
|
| 58 |
+
parser.set_defaults(command=convert_command)
|
| 59 |
+
|
| 60 |
+
return parser
|
| 61 |
+
|
| 62 |
+
def convert_command(args):
|
| 63 |
+
env = Environment()
|
| 64 |
+
|
| 65 |
+
try:
|
| 66 |
+
converter = env.converters.get(args.output_format)
|
| 67 |
+
except KeyError:
|
| 68 |
+
raise CliException("Converter for format '%s' is not found" % \
|
| 69 |
+
args.output_format)
|
| 70 |
+
extra_args = converter.from_cmdline(args.extra_args)
|
| 71 |
+
def converter_proxy(extractor, save_dir):
|
| 72 |
+
return converter.convert(extractor, save_dir, **extra_args)
|
| 73 |
+
|
| 74 |
+
filter_args = FilterModes.make_filter_args(args.filter_mode)
|
| 75 |
+
|
| 76 |
+
if not args.input_format:
|
| 77 |
+
matches = []
|
| 78 |
+
for format_name in env.importers.items:
|
| 79 |
+
log.debug("Checking '%s' format...", format_name)
|
| 80 |
+
importer = env.make_importer(format_name)
|
| 81 |
+
try:
|
| 82 |
+
match = importer.detect(args.source)
|
| 83 |
+
if match:
|
| 84 |
+
log.debug("format matched")
|
| 85 |
+
matches.append((format_name, importer))
|
| 86 |
+
except NotImplementedError:
|
| 87 |
+
log.debug("Format '%s' does not support auto detection.",
|
| 88 |
+
format_name)
|
| 89 |
+
|
| 90 |
+
if len(matches) == 0:
|
| 91 |
+
log.error("Failed to detect dataset format. "
|
| 92 |
+
"Try to specify format with '-if/--input-format' parameter.")
|
| 93 |
+
return 1
|
| 94 |
+
elif len(matches) != 1:
|
| 95 |
+
log.error("Multiple formats match the dataset: %s. "
|
| 96 |
+
"Try to specify format with '-if/--input-format' parameter.",
|
| 97 |
+
', '.join(m[0] for m in matches))
|
| 98 |
+
return 2
|
| 99 |
+
|
| 100 |
+
format_name, importer = matches[0]
|
| 101 |
+
args.input_format = format_name
|
| 102 |
+
log.info("Source dataset format detected as '%s'", args.input_format)
|
| 103 |
+
else:
|
| 104 |
+
try:
|
| 105 |
+
importer = env.make_importer(args.input_format)
|
| 106 |
+
if hasattr(importer, 'from_cmdline'):
|
| 107 |
+
extra_args = importer.from_cmdline()
|
| 108 |
+
except KeyError:
|
| 109 |
+
raise CliException("Importer for format '%s' is not found" % \
|
| 110 |
+
args.input_format)
|
| 111 |
+
|
| 112 |
+
source = osp.abspath(args.source)
|
| 113 |
+
|
| 114 |
+
dst_dir = args.dst_dir
|
| 115 |
+
if dst_dir:
|
| 116 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 117 |
+
raise CliException("Directory '%s' already exists "
|
| 118 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 119 |
+
else:
|
| 120 |
+
dst_dir = generate_next_file_name('%s-%s' % \
|
| 121 |
+
(osp.basename(source), make_file_name(args.output_format)))
|
| 122 |
+
dst_dir = osp.abspath(dst_dir)
|
| 123 |
+
|
| 124 |
+
project = importer(source)
|
| 125 |
+
dataset = project.make_dataset()
|
| 126 |
+
|
| 127 |
+
log.info("Exporting the dataset")
|
| 128 |
+
dataset.export_project(
|
| 129 |
+
save_dir=dst_dir,
|
| 130 |
+
converter=converter_proxy,
|
| 131 |
+
filter_expr=args.filter,
|
| 132 |
+
**filter_args)
|
| 133 |
+
|
| 134 |
+
log.info("Dataset exported to '%s' as '%s'" % \
|
| 135 |
+
(dst_dir, args.output_format))
|
| 136 |
+
|
| 137 |
+
return 0
|
testbed/openvinotoolkit__datumaro/datumaro/cli/commands/export.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
# pylint: disable=unused-import
|
| 7 |
+
|
| 8 |
+
from ..contexts.project import build_export_parser as build_parser
|
testbed/openvinotoolkit__datumaro/datumaro/cli/commands/remove.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
# pylint: disable=unused-import
|
| 7 |
+
|
| 8 |
+
from ..contexts.source import build_remove_parser as build_parser
|
testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from . import project, source, model, item
|
testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/item/__init__.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
from ...util import add_subparser
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_export_parser(parser_ctor=argparse.ArgumentParser):
|
| 12 |
+
parser = parser_ctor()
|
| 13 |
+
return parser
|
| 14 |
+
|
| 15 |
+
def build_stats_parser(parser_ctor=argparse.ArgumentParser):
|
| 16 |
+
parser = parser_ctor()
|
| 17 |
+
return parser
|
| 18 |
+
|
| 19 |
+
def build_diff_parser(parser_ctor=argparse.ArgumentParser):
|
| 20 |
+
parser = parser_ctor()
|
| 21 |
+
return parser
|
| 22 |
+
|
| 23 |
+
def build_edit_parser(parser_ctor=argparse.ArgumentParser):
|
| 24 |
+
parser = parser_ctor()
|
| 25 |
+
return parser
|
| 26 |
+
|
| 27 |
+
def build_parser(parser_ctor=argparse.ArgumentParser):
|
| 28 |
+
parser = parser_ctor()
|
| 29 |
+
|
| 30 |
+
subparsers = parser.add_subparsers()
|
| 31 |
+
add_subparser(subparsers, 'export', build_export_parser)
|
| 32 |
+
add_subparser(subparsers, 'stats', build_stats_parser)
|
| 33 |
+
add_subparser(subparsers, 'diff', build_diff_parser)
|
| 34 |
+
add_subparser(subparsers, 'edit', build_edit_parser)
|
| 35 |
+
|
| 36 |
+
return parser
|
testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/project/__init__.py
ADDED
|
@@ -0,0 +1,831 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
import json
|
| 8 |
+
import logging as log
|
| 9 |
+
import os
|
| 10 |
+
import os.path as osp
|
| 11 |
+
import shutil
|
| 12 |
+
from enum import Enum
|
| 13 |
+
|
| 14 |
+
from datumaro.components.cli_plugin import CliPlugin
|
| 15 |
+
from datumaro.components.dataset_filter import DatasetItemEncoder
|
| 16 |
+
from datumaro.components.extractor import AnnotationType
|
| 17 |
+
from datumaro.components.operations import (DistanceComparator,
|
| 18 |
+
ExactComparator, compute_ann_statistics, compute_image_statistics, mean_std)
|
| 19 |
+
from datumaro.components.project import \
|
| 20 |
+
PROJECT_DEFAULT_CONFIG as DEFAULT_CONFIG
|
| 21 |
+
from datumaro.components.project import Environment, Project
|
| 22 |
+
|
| 23 |
+
from ...util import (CliException, MultilineFormatter, add_subparser,
|
| 24 |
+
make_file_name)
|
| 25 |
+
from ...util.project import generate_next_file_name, load_project
|
| 26 |
+
from .diff import DiffVisualizer
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def build_create_parser(parser_ctor=argparse.ArgumentParser):
|
| 30 |
+
parser = parser_ctor(help="Create empty project",
|
| 31 |
+
description="""
|
| 32 |
+
Create a new empty project.|n
|
| 33 |
+
|n
|
| 34 |
+
Examples:|n
|
| 35 |
+
- Create a project in the current directory:|n
|
| 36 |
+
|s|screate -n myproject|n
|
| 37 |
+
|n
|
| 38 |
+
- Create a project in other directory:|n
|
| 39 |
+
|s|screate -o path/I/like/
|
| 40 |
+
""",
|
| 41 |
+
formatter_class=MultilineFormatter)
|
| 42 |
+
|
| 43 |
+
parser.add_argument('-o', '--output-dir', default='.', dest='dst_dir',
|
| 44 |
+
help="Save directory for the new project (default: current dir")
|
| 45 |
+
parser.add_argument('-n', '--name', default=None,
|
| 46 |
+
help="Name of the new project (default: same as project dir)")
|
| 47 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 48 |
+
help="Overwrite existing files in the save directory")
|
| 49 |
+
parser.set_defaults(command=create_command)
|
| 50 |
+
|
| 51 |
+
return parser
|
| 52 |
+
|
| 53 |
+
def create_command(args):
|
| 54 |
+
project_dir = osp.abspath(args.dst_dir)
|
| 55 |
+
|
| 56 |
+
project_env_dir = osp.join(project_dir, DEFAULT_CONFIG.env_dir)
|
| 57 |
+
if osp.isdir(project_env_dir) and os.listdir(project_env_dir):
|
| 58 |
+
if not args.overwrite:
|
| 59 |
+
raise CliException("Directory '%s' already exists "
|
| 60 |
+
"(pass --overwrite to overwrite)" % project_env_dir)
|
| 61 |
+
else:
|
| 62 |
+
shutil.rmtree(project_env_dir, ignore_errors=True)
|
| 63 |
+
|
| 64 |
+
own_dataset_dir = osp.join(project_dir, DEFAULT_CONFIG.dataset_dir)
|
| 65 |
+
if osp.isdir(own_dataset_dir) and os.listdir(own_dataset_dir):
|
| 66 |
+
if not args.overwrite:
|
| 67 |
+
raise CliException("Directory '%s' already exists "
|
| 68 |
+
"(pass --overwrite to overwrite)" % own_dataset_dir)
|
| 69 |
+
else:
|
| 70 |
+
# NOTE: remove the dir to avoid using data from previous project
|
| 71 |
+
shutil.rmtree(own_dataset_dir)
|
| 72 |
+
|
| 73 |
+
project_name = args.name
|
| 74 |
+
if project_name is None:
|
| 75 |
+
project_name = osp.basename(project_dir)
|
| 76 |
+
|
| 77 |
+
log.info("Creating project at '%s'" % project_dir)
|
| 78 |
+
|
| 79 |
+
Project.generate(project_dir, {
|
| 80 |
+
'project_name': project_name,
|
| 81 |
+
})
|
| 82 |
+
|
| 83 |
+
log.info("Project has been created at '%s'" % project_dir)
|
| 84 |
+
|
| 85 |
+
return 0
|
| 86 |
+
|
| 87 |
+
def build_import_parser(parser_ctor=argparse.ArgumentParser):
|
| 88 |
+
builtins = sorted(Environment().importers.items)
|
| 89 |
+
|
| 90 |
+
parser = parser_ctor(help="Create project from existing dataset",
|
| 91 |
+
description="""
|
| 92 |
+
Creates a project from an existing dataset. The source can be:|n
|
| 93 |
+
- a dataset in a supported format (check 'formats' section below)|n
|
| 94 |
+
- a Datumaro project|n
|
| 95 |
+
|n
|
| 96 |
+
Formats:|n
|
| 97 |
+
Datasets come in a wide variety of formats. Each dataset
|
| 98 |
+
format defines its own data structure and rules on how to
|
| 99 |
+
interpret the data. For example, the following data structure
|
| 100 |
+
is used in COCO format:|n
|
| 101 |
+
/dataset/|n
|
| 102 |
+
- /images/<id>.jpg|n
|
| 103 |
+
- /annotations/|n
|
| 104 |
+
|n
|
| 105 |
+
In Datumaro dataset formats are supported by
|
| 106 |
+
Extractor-s and Importer-s.
|
| 107 |
+
An Extractor produces a list of dataset items corresponding
|
| 108 |
+
to the dataset. An Importer creates a project from the
|
| 109 |
+
data source location.
|
| 110 |
+
It is possible to add a custom Extractor and Importer.
|
| 111 |
+
To do this, you need to put an Extractor and
|
| 112 |
+
Importer implementation scripts to
|
| 113 |
+
<project_dir>/.datumaro/extractors
|
| 114 |
+
and <project_dir>/.datumaro/importers.|n
|
| 115 |
+
|n
|
| 116 |
+
List of builtin dataset formats: %s|n
|
| 117 |
+
|n
|
| 118 |
+
Examples:|n
|
| 119 |
+
- Create a project from VOC dataset in the current directory:|n
|
| 120 |
+
|s|simport -f voc -i path/to/voc|n
|
| 121 |
+
|n
|
| 122 |
+
- Create a project from COCO dataset in other directory:|n
|
| 123 |
+
|s|simport -f coco -i path/to/coco -o path/I/like/
|
| 124 |
+
""" % ', '.join(builtins),
|
| 125 |
+
formatter_class=MultilineFormatter)
|
| 126 |
+
|
| 127 |
+
parser.add_argument('-o', '--output-dir', default='.', dest='dst_dir',
|
| 128 |
+
help="Directory to save the new project to (default: current dir)")
|
| 129 |
+
parser.add_argument('-n', '--name', default=None,
|
| 130 |
+
help="Name of the new project (default: same as project dir)")
|
| 131 |
+
parser.add_argument('--copy', action='store_true',
|
| 132 |
+
help="Copy the dataset instead of saving source links")
|
| 133 |
+
parser.add_argument('--skip-check', action='store_true',
|
| 134 |
+
help="Skip source checking")
|
| 135 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 136 |
+
help="Overwrite existing files in the save directory")
|
| 137 |
+
parser.add_argument('-i', '--input-path', required=True, dest='source',
|
| 138 |
+
help="Path to import project from")
|
| 139 |
+
parser.add_argument('-f', '--format',
|
| 140 |
+
help="Source project format. Will try to detect, if not specified.")
|
| 141 |
+
parser.add_argument('extra_args', nargs=argparse.REMAINDER,
|
| 142 |
+
help="Additional arguments for importer (pass '-- -h' for help)")
|
| 143 |
+
parser.set_defaults(command=import_command)
|
| 144 |
+
|
| 145 |
+
return parser
|
| 146 |
+
|
| 147 |
+
def import_command(args):
|
| 148 |
+
project_dir = osp.abspath(args.dst_dir)
|
| 149 |
+
|
| 150 |
+
project_env_dir = osp.join(project_dir, DEFAULT_CONFIG.env_dir)
|
| 151 |
+
if osp.isdir(project_env_dir) and os.listdir(project_env_dir):
|
| 152 |
+
if not args.overwrite:
|
| 153 |
+
raise CliException("Directory '%s' already exists "
|
| 154 |
+
"(pass --overwrite to overwrite)" % project_env_dir)
|
| 155 |
+
else:
|
| 156 |
+
shutil.rmtree(project_env_dir, ignore_errors=True)
|
| 157 |
+
|
| 158 |
+
own_dataset_dir = osp.join(project_dir, DEFAULT_CONFIG.dataset_dir)
|
| 159 |
+
if osp.isdir(own_dataset_dir) and os.listdir(own_dataset_dir):
|
| 160 |
+
if not args.overwrite:
|
| 161 |
+
raise CliException("Directory '%s' already exists "
|
| 162 |
+
"(pass --overwrite to overwrite)" % own_dataset_dir)
|
| 163 |
+
else:
|
| 164 |
+
# NOTE: remove the dir to avoid using data from previous project
|
| 165 |
+
shutil.rmtree(own_dataset_dir)
|
| 166 |
+
|
| 167 |
+
project_name = args.name
|
| 168 |
+
if project_name is None:
|
| 169 |
+
project_name = osp.basename(project_dir)
|
| 170 |
+
|
| 171 |
+
env = Environment()
|
| 172 |
+
log.info("Importing project from '%s'" % args.source)
|
| 173 |
+
|
| 174 |
+
extra_args = {}
|
| 175 |
+
if not args.format:
|
| 176 |
+
if args.extra_args:
|
| 177 |
+
raise CliException("Extra args can not be used without format")
|
| 178 |
+
|
| 179 |
+
log.info("Trying to detect dataset format...")
|
| 180 |
+
|
| 181 |
+
matches = []
|
| 182 |
+
for format_name in env.importers.items:
|
| 183 |
+
log.debug("Checking '%s' format...", format_name)
|
| 184 |
+
importer = env.make_importer(format_name)
|
| 185 |
+
try:
|
| 186 |
+
match = importer.detect(args.source)
|
| 187 |
+
if match:
|
| 188 |
+
log.debug("format matched")
|
| 189 |
+
matches.append((format_name, importer))
|
| 190 |
+
except NotImplementedError:
|
| 191 |
+
log.debug("Format '%s' does not support auto detection.",
|
| 192 |
+
format_name)
|
| 193 |
+
|
| 194 |
+
if len(matches) == 0:
|
| 195 |
+
log.error("Failed to detect dataset format automatically. "
|
| 196 |
+
"Try to specify format with '-f/--format' parameter.")
|
| 197 |
+
return 1
|
| 198 |
+
elif len(matches) != 1:
|
| 199 |
+
log.error("Multiple formats match the dataset: %s. "
|
| 200 |
+
"Try to specify format with '-f/--format' parameter.",
|
| 201 |
+
', '.join(m[0] for m in matches))
|
| 202 |
+
return 2
|
| 203 |
+
|
| 204 |
+
format_name, importer = matches[0]
|
| 205 |
+
args.format = format_name
|
| 206 |
+
else:
|
| 207 |
+
try:
|
| 208 |
+
importer = env.make_importer(args.format)
|
| 209 |
+
if hasattr(importer, 'from_cmdline'):
|
| 210 |
+
extra_args = importer.from_cmdline(args.extra_args)
|
| 211 |
+
except KeyError:
|
| 212 |
+
raise CliException("Importer for format '%s' is not found" % \
|
| 213 |
+
args.format)
|
| 214 |
+
|
| 215 |
+
log.info("Importing project as '%s'" % args.format)
|
| 216 |
+
|
| 217 |
+
source = osp.abspath(args.source)
|
| 218 |
+
project = importer(source, **extra_args)
|
| 219 |
+
project.config.project_name = project_name
|
| 220 |
+
project.config.project_dir = project_dir
|
| 221 |
+
|
| 222 |
+
if not args.skip_check or args.copy:
|
| 223 |
+
log.info("Checking the dataset...")
|
| 224 |
+
dataset = project.make_dataset()
|
| 225 |
+
if args.copy:
|
| 226 |
+
log.info("Cloning data...")
|
| 227 |
+
dataset.save(merge=True, save_images=True)
|
| 228 |
+
else:
|
| 229 |
+
project.save()
|
| 230 |
+
|
| 231 |
+
log.info("Project has been created at '%s'" % project_dir)
|
| 232 |
+
|
| 233 |
+
return 0
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
class FilterModes(Enum):
|
| 237 |
+
# primary
|
| 238 |
+
items = 1
|
| 239 |
+
annotations = 2
|
| 240 |
+
items_annotations = 3
|
| 241 |
+
|
| 242 |
+
# shortcuts
|
| 243 |
+
i = 1
|
| 244 |
+
a = 2
|
| 245 |
+
i_a = 3
|
| 246 |
+
a_i = 3
|
| 247 |
+
annotations_items = 3
|
| 248 |
+
|
| 249 |
+
@staticmethod
|
| 250 |
+
def parse(s):
|
| 251 |
+
s = s.lower()
|
| 252 |
+
s = s.replace('+', '_')
|
| 253 |
+
return FilterModes[s]
|
| 254 |
+
|
| 255 |
+
@classmethod
|
| 256 |
+
def make_filter_args(cls, mode):
|
| 257 |
+
if mode == cls.items:
|
| 258 |
+
return {}
|
| 259 |
+
elif mode == cls.annotations:
|
| 260 |
+
return {
|
| 261 |
+
'filter_annotations': True
|
| 262 |
+
}
|
| 263 |
+
elif mode == cls.items_annotations:
|
| 264 |
+
return {
|
| 265 |
+
'filter_annotations': True,
|
| 266 |
+
'remove_empty': True,
|
| 267 |
+
}
|
| 268 |
+
else:
|
| 269 |
+
raise NotImplementedError()
|
| 270 |
+
|
| 271 |
+
@classmethod
|
| 272 |
+
def list_options(cls):
|
| 273 |
+
return [m.name.replace('_', '+') for m in cls]
|
| 274 |
+
|
| 275 |
+
def build_export_parser(parser_ctor=argparse.ArgumentParser):
|
| 276 |
+
builtins = sorted(Environment().converters.items)
|
| 277 |
+
|
| 278 |
+
parser = parser_ctor(help="Export project",
|
| 279 |
+
description="""
|
| 280 |
+
Exports the project dataset in some format. Optionally, a filter
|
| 281 |
+
can be passed, check 'filter' command description for more info.
|
| 282 |
+
Each dataset format has its own options, which
|
| 283 |
+
are passed after '--' separator (see examples), pass '-- -h'
|
| 284 |
+
for more info. If not stated otherwise, by default
|
| 285 |
+
only annotations are exported, to include images pass
|
| 286 |
+
'--save-images' parameter.|n
|
| 287 |
+
|n
|
| 288 |
+
Formats:|n
|
| 289 |
+
In Datumaro dataset formats are supported by Converter-s.
|
| 290 |
+
A Converter produces a dataset of a specific format
|
| 291 |
+
from dataset items. It is possible to add a custom Converter.
|
| 292 |
+
To do this, you need to put a Converter
|
| 293 |
+
definition script to <project_dir>/.datumaro/converters.|n
|
| 294 |
+
|n
|
| 295 |
+
List of builtin dataset formats: %s|n
|
| 296 |
+
|n
|
| 297 |
+
Examples:|n
|
| 298 |
+
- Export project as a VOC-like dataset, include images:|n
|
| 299 |
+
|s|sexport -f voc -- --save-images|n
|
| 300 |
+
|n
|
| 301 |
+
- Export project as a COCO-like dataset in other directory:|n
|
| 302 |
+
|s|sexport -f coco -o path/I/like/
|
| 303 |
+
""" % ', '.join(builtins),
|
| 304 |
+
formatter_class=MultilineFormatter)
|
| 305 |
+
|
| 306 |
+
parser.add_argument('-e', '--filter', default=None,
|
| 307 |
+
help="Filter expression for dataset items")
|
| 308 |
+
parser.add_argument('--filter-mode', default=FilterModes.i.name,
|
| 309 |
+
type=FilterModes.parse,
|
| 310 |
+
help="Filter mode (options: %s; default: %s)" % \
|
| 311 |
+
(', '.join(FilterModes.list_options()) , '%(default)s'))
|
| 312 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
|
| 313 |
+
help="Directory to save output (default: a subdir in the current one)")
|
| 314 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 315 |
+
help="Overwrite existing files in the save directory")
|
| 316 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 317 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 318 |
+
parser.add_argument('-f', '--format', required=True,
|
| 319 |
+
help="Output format")
|
| 320 |
+
parser.add_argument('extra_args', nargs=argparse.REMAINDER, default=None,
|
| 321 |
+
help="Additional arguments for converter (pass '-- -h' for help)")
|
| 322 |
+
parser.set_defaults(command=export_command)
|
| 323 |
+
|
| 324 |
+
return parser
|
| 325 |
+
|
| 326 |
+
def export_command(args):
|
| 327 |
+
project = load_project(args.project_dir)
|
| 328 |
+
|
| 329 |
+
dst_dir = args.dst_dir
|
| 330 |
+
if dst_dir:
|
| 331 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 332 |
+
raise CliException("Directory '%s' already exists "
|
| 333 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 334 |
+
else:
|
| 335 |
+
dst_dir = generate_next_file_name('%s-%s' % \
|
| 336 |
+
(project.config.project_name, make_file_name(args.format)))
|
| 337 |
+
dst_dir = osp.abspath(dst_dir)
|
| 338 |
+
|
| 339 |
+
try:
|
| 340 |
+
converter = project.env.converters.get(args.format)
|
| 341 |
+
except KeyError:
|
| 342 |
+
raise CliException("Converter for format '%s' is not found" % \
|
| 343 |
+
args.format)
|
| 344 |
+
|
| 345 |
+
extra_args = converter.from_cmdline(args.extra_args)
|
| 346 |
+
def converter_proxy(extractor, save_dir):
|
| 347 |
+
return converter.convert(extractor, save_dir, **extra_args)
|
| 348 |
+
|
| 349 |
+
filter_args = FilterModes.make_filter_args(args.filter_mode)
|
| 350 |
+
|
| 351 |
+
log.info("Loading the project...")
|
| 352 |
+
dataset = project.make_dataset()
|
| 353 |
+
|
| 354 |
+
log.info("Exporting the project...")
|
| 355 |
+
dataset.export_project(
|
| 356 |
+
save_dir=dst_dir,
|
| 357 |
+
converter=converter_proxy,
|
| 358 |
+
filter_expr=args.filter,
|
| 359 |
+
**filter_args)
|
| 360 |
+
log.info("Project exported to '%s' as '%s'" % \
|
| 361 |
+
(dst_dir, args.format))
|
| 362 |
+
|
| 363 |
+
return 0
|
| 364 |
+
|
| 365 |
+
def build_filter_parser(parser_ctor=argparse.ArgumentParser):
|
| 366 |
+
parser = parser_ctor(help="Extract subproject",
|
| 367 |
+
description="""
|
| 368 |
+
Extracts a subproject that contains only items matching filter.
|
| 369 |
+
A filter is an XPath expression, which is applied to XML
|
| 370 |
+
representation of a dataset item. Check '--dry-run' parameter
|
| 371 |
+
to see XML representations of the dataset items.|n
|
| 372 |
+
|n
|
| 373 |
+
To filter annotations use the mode ('-m') parameter.|n
|
| 374 |
+
Supported modes:|n
|
| 375 |
+
- 'i', 'items'|n
|
| 376 |
+
- 'a', 'annotations'|n
|
| 377 |
+
- 'i+a', 'a+i', 'items+annotations', 'annotations+items'|n
|
| 378 |
+
When filtering annotations, use the 'items+annotations'
|
| 379 |
+
mode to point that annotation-less dataset items should be
|
| 380 |
+
removed. To select an annotation, write an XPath that
|
| 381 |
+
returns 'annotation' elements (see examples).|n
|
| 382 |
+
|n
|
| 383 |
+
Examples:|n
|
| 384 |
+
- Filter images with width < height:|n
|
| 385 |
+
|s|sextract -e '/item[image/width < image/height]'|n
|
| 386 |
+
|n
|
| 387 |
+
- Filter images with large-area bboxes:|n
|
| 388 |
+
|s|sextract -e '/item[annotation/type="bbox" and
|
| 389 |
+
annotation/area>2000]'|n
|
| 390 |
+
|n
|
| 391 |
+
- Filter out all irrelevant annotations from items:|n
|
| 392 |
+
|s|sextract -m a -e '/item/annotation[label = "person"]'|n
|
| 393 |
+
|n
|
| 394 |
+
- Filter out all irrelevant annotations from items:|n
|
| 395 |
+
|s|sextract -m a -e '/item/annotation[label="cat" and
|
| 396 |
+
area > 99.5]'|n
|
| 397 |
+
|n
|
| 398 |
+
- Filter occluded annotations and items, if no annotations left:|n
|
| 399 |
+
|s|sextract -m i+a -e '/item/annotation[occluded="True"]'
|
| 400 |
+
""",
|
| 401 |
+
formatter_class=MultilineFormatter)
|
| 402 |
+
|
| 403 |
+
parser.add_argument('-e', '--filter', default=None,
|
| 404 |
+
help="XML XPath filter expression for dataset items")
|
| 405 |
+
parser.add_argument('-m', '--mode', default=FilterModes.i.name,
|
| 406 |
+
type=FilterModes.parse,
|
| 407 |
+
help="Filter mode (options: %s; default: %s)" % \
|
| 408 |
+
(', '.join(FilterModes.list_options()) , '%(default)s'))
|
| 409 |
+
parser.add_argument('--dry-run', action='store_true',
|
| 410 |
+
help="Print XML representations to be filtered and exit")
|
| 411 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
|
| 412 |
+
help="Output directory (default: update current project)")
|
| 413 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 414 |
+
help="Overwrite existing files in the save directory")
|
| 415 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 416 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 417 |
+
parser.set_defaults(command=filter_command)
|
| 418 |
+
|
| 419 |
+
return parser
|
| 420 |
+
|
| 421 |
+
def filter_command(args):
|
| 422 |
+
project = load_project(args.project_dir)
|
| 423 |
+
|
| 424 |
+
if not args.dry_run:
|
| 425 |
+
dst_dir = args.dst_dir
|
| 426 |
+
if dst_dir:
|
| 427 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 428 |
+
raise CliException("Directory '%s' already exists "
|
| 429 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 430 |
+
else:
|
| 431 |
+
dst_dir = generate_next_file_name('%s-filter' % \
|
| 432 |
+
project.config.project_name)
|
| 433 |
+
dst_dir = osp.abspath(dst_dir)
|
| 434 |
+
|
| 435 |
+
dataset = project.make_dataset()
|
| 436 |
+
|
| 437 |
+
filter_args = FilterModes.make_filter_args(args.mode)
|
| 438 |
+
|
| 439 |
+
if args.dry_run:
|
| 440 |
+
dataset = dataset.filter(expr=args.filter, **filter_args)
|
| 441 |
+
for item in dataset:
|
| 442 |
+
encoded_item = DatasetItemEncoder.encode(item, dataset.categories())
|
| 443 |
+
xml_item = DatasetItemEncoder.to_string(encoded_item)
|
| 444 |
+
print(xml_item)
|
| 445 |
+
return 0
|
| 446 |
+
|
| 447 |
+
if not args.filter:
|
| 448 |
+
raise CliException("Expected a filter expression ('-e' argument)")
|
| 449 |
+
|
| 450 |
+
dataset.filter_project(save_dir=dst_dir,
|
| 451 |
+
filter_expr=args.filter, **filter_args)
|
| 452 |
+
|
| 453 |
+
log.info("Subproject has been extracted to '%s'" % dst_dir)
|
| 454 |
+
|
| 455 |
+
return 0
|
| 456 |
+
|
| 457 |
+
def build_merge_parser(parser_ctor=argparse.ArgumentParser):
|
| 458 |
+
parser = parser_ctor(help="Merge two projects",
|
| 459 |
+
description="""
|
| 460 |
+
Updates items of the current project with items
|
| 461 |
+
from other project.|n
|
| 462 |
+
|n
|
| 463 |
+
Examples:|n
|
| 464 |
+
- Update a project with items from other project:|n
|
| 465 |
+
|s|smerge -p path/to/first/project path/to/other/project
|
| 466 |
+
""",
|
| 467 |
+
formatter_class=MultilineFormatter)
|
| 468 |
+
|
| 469 |
+
parser.add_argument('other_project_dir',
|
| 470 |
+
help="Path to a project")
|
| 471 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
|
| 472 |
+
help="Output directory (default: current project's dir)")
|
| 473 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 474 |
+
help="Overwrite existing files in the save directory")
|
| 475 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 476 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 477 |
+
parser.set_defaults(command=merge_command)
|
| 478 |
+
|
| 479 |
+
return parser
|
| 480 |
+
|
| 481 |
+
def merge_command(args):
|
| 482 |
+
first_project = load_project(args.project_dir)
|
| 483 |
+
second_project = load_project(args.other_project_dir)
|
| 484 |
+
|
| 485 |
+
dst_dir = args.dst_dir
|
| 486 |
+
if dst_dir:
|
| 487 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 488 |
+
raise CliException("Directory '%s' already exists "
|
| 489 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 490 |
+
|
| 491 |
+
first_dataset = first_project.make_dataset()
|
| 492 |
+
second_dataset = second_project.make_dataset()
|
| 493 |
+
|
| 494 |
+
first_dataset.update(second_dataset)
|
| 495 |
+
first_dataset.save(save_dir=dst_dir)
|
| 496 |
+
|
| 497 |
+
if dst_dir is None:
|
| 498 |
+
dst_dir = first_project.config.project_dir
|
| 499 |
+
dst_dir = osp.abspath(dst_dir)
|
| 500 |
+
log.info("Merge results have been saved to '%s'" % dst_dir)
|
| 501 |
+
|
| 502 |
+
return 0
|
| 503 |
+
|
| 504 |
+
def build_diff_parser(parser_ctor=argparse.ArgumentParser):
|
| 505 |
+
parser = parser_ctor(help="Compare projects",
|
| 506 |
+
description="""
|
| 507 |
+
Compares two projects, match annotations by distance.|n
|
| 508 |
+
|n
|
| 509 |
+
Examples:|n
|
| 510 |
+
- Compare two projects, match boxes if IoU > 0.7,|n
|
| 511 |
+
|s|s|s|sprint results to Tensorboard:
|
| 512 |
+
|s|sdiff path/to/other/project -o diff/ -v tensorboard --iou-thresh 0.7
|
| 513 |
+
""",
|
| 514 |
+
formatter_class=MultilineFormatter)
|
| 515 |
+
|
| 516 |
+
parser.add_argument('other_project_dir',
|
| 517 |
+
help="Directory of the second project to be compared")
|
| 518 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
|
| 519 |
+
help="Directory to save comparison results (default: do not save)")
|
| 520 |
+
parser.add_argument('-v', '--visualizer',
|
| 521 |
+
default=DiffVisualizer.DEFAULT_FORMAT,
|
| 522 |
+
choices=[f.name for f in DiffVisualizer.Format],
|
| 523 |
+
help="Output format (default: %(default)s)")
|
| 524 |
+
parser.add_argument('--iou-thresh', default=0.5, type=float,
|
| 525 |
+
help="IoU match threshold for detections (default: %(default)s)")
|
| 526 |
+
parser.add_argument('--conf-thresh', default=0.5, type=float,
|
| 527 |
+
help="Confidence threshold for detections (default: %(default)s)")
|
| 528 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 529 |
+
help="Overwrite existing files in the save directory")
|
| 530 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 531 |
+
help="Directory of the first project to be compared (default: current dir)")
|
| 532 |
+
parser.set_defaults(command=diff_command)
|
| 533 |
+
|
| 534 |
+
return parser
|
| 535 |
+
|
| 536 |
+
def diff_command(args):
|
| 537 |
+
first_project = load_project(args.project_dir)
|
| 538 |
+
second_project = load_project(args.other_project_dir)
|
| 539 |
+
|
| 540 |
+
comparator = DistanceComparator(iou_threshold=args.iou_thresh)
|
| 541 |
+
|
| 542 |
+
dst_dir = args.dst_dir
|
| 543 |
+
if dst_dir:
|
| 544 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 545 |
+
raise CliException("Directory '%s' already exists "
|
| 546 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 547 |
+
else:
|
| 548 |
+
dst_dir = generate_next_file_name('%s-%s-diff' % (
|
| 549 |
+
first_project.config.project_name,
|
| 550 |
+
second_project.config.project_name)
|
| 551 |
+
)
|
| 552 |
+
dst_dir = osp.abspath(dst_dir)
|
| 553 |
+
log.info("Saving diff to '%s'" % dst_dir)
|
| 554 |
+
|
| 555 |
+
dst_dir_existed = osp.exists(dst_dir)
|
| 556 |
+
try:
|
| 557 |
+
visualizer = DiffVisualizer(save_dir=dst_dir, comparator=comparator,
|
| 558 |
+
output_format=args.visualizer)
|
| 559 |
+
visualizer.save_dataset_diff(
|
| 560 |
+
first_project.make_dataset(),
|
| 561 |
+
second_project.make_dataset())
|
| 562 |
+
except BaseException:
|
| 563 |
+
if not dst_dir_existed and osp.isdir(dst_dir):
|
| 564 |
+
shutil.rmtree(dst_dir, ignore_errors=True)
|
| 565 |
+
raise
|
| 566 |
+
|
| 567 |
+
return 0
|
| 568 |
+
|
| 569 |
+
_ediff_default_if = ['id', 'group'] # avoid https://bugs.python.org/issue16399
|
| 570 |
+
|
| 571 |
+
def build_ediff_parser(parser_ctor=argparse.ArgumentParser):
|
| 572 |
+
parser = parser_ctor(help="Compare projects for equality",
|
| 573 |
+
description="""
|
| 574 |
+
Compares two projects for equality.|n
|
| 575 |
+
|n
|
| 576 |
+
Examples:|n
|
| 577 |
+
- Compare two projects, exclude annotation group |n
|
| 578 |
+
|s|s|sand the 'is_crowd' attribute from comparison:|n
|
| 579 |
+
|s|sediff other/project/ -if group -ia is_crowd
|
| 580 |
+
""",
|
| 581 |
+
formatter_class=MultilineFormatter)
|
| 582 |
+
|
| 583 |
+
parser.add_argument('other_project_dir',
|
| 584 |
+
help="Directory of the second project to be compared")
|
| 585 |
+
parser.add_argument('-iia', '--ignore-item-attr', action='append',
|
| 586 |
+
help="Ignore item attribute (repeatable)")
|
| 587 |
+
parser.add_argument('-ia', '--ignore-attr', action='append',
|
| 588 |
+
help="Ignore annotation attribute (repeatable)")
|
| 589 |
+
parser.add_argument('-if', '--ignore-field', action='append',
|
| 590 |
+
help="Ignore annotation field (repeatable, default: %s)" % \
|
| 591 |
+
_ediff_default_if)
|
| 592 |
+
parser.add_argument('--match-images', action='store_true',
|
| 593 |
+
help='Match dataset items by images instead of ids')
|
| 594 |
+
parser.add_argument('--all', action='store_true',
|
| 595 |
+
help="Include matches in the output")
|
| 596 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 597 |
+
help="Directory of the first project to be compared (default: current dir)")
|
| 598 |
+
parser.set_defaults(command=ediff_command)
|
| 599 |
+
|
| 600 |
+
return parser
|
| 601 |
+
|
| 602 |
+
def ediff_command(args):
|
| 603 |
+
first_project = load_project(args.project_dir)
|
| 604 |
+
second_project = load_project(args.other_project_dir)
|
| 605 |
+
|
| 606 |
+
if args.ignore_field:
|
| 607 |
+
args.ignore_field = _ediff_default_if
|
| 608 |
+
comparator = ExactComparator(
|
| 609 |
+
match_images=args.match_images,
|
| 610 |
+
ignored_fields=args.ignore_field,
|
| 611 |
+
ignored_attrs=args.ignore_attr,
|
| 612 |
+
ignored_item_attrs=args.ignore_item_attr)
|
| 613 |
+
matches, mismatches, a_extra, b_extra, errors = \
|
| 614 |
+
comparator.compare_datasets(
|
| 615 |
+
first_project.make_dataset(), second_project.make_dataset())
|
| 616 |
+
output = {
|
| 617 |
+
"mismatches": mismatches,
|
| 618 |
+
"a_extra_items": sorted(a_extra),
|
| 619 |
+
"b_extra_items": sorted(b_extra),
|
| 620 |
+
"errors": errors,
|
| 621 |
+
}
|
| 622 |
+
if args.all:
|
| 623 |
+
output["matches"] = matches
|
| 624 |
+
|
| 625 |
+
output_file = generate_next_file_name('diff', ext='.json')
|
| 626 |
+
with open(output_file, 'w') as f:
|
| 627 |
+
json.dump(output, f, indent=4, sort_keys=True)
|
| 628 |
+
|
| 629 |
+
print("Found:")
|
| 630 |
+
print("The first project has %s unmatched items" % len(a_extra))
|
| 631 |
+
print("The second project has %s unmatched items" % len(b_extra))
|
| 632 |
+
print("%s item conflicts" % len(errors))
|
| 633 |
+
print("%s matching annotations" % len(matches))
|
| 634 |
+
print("%s mismatching annotations" % len(mismatches))
|
| 635 |
+
|
| 636 |
+
log.info("Output has been saved to '%s'" % output_file)
|
| 637 |
+
|
| 638 |
+
return 0
|
| 639 |
+
|
| 640 |
+
def build_transform_parser(parser_ctor=argparse.ArgumentParser):
|
| 641 |
+
builtins = sorted(Environment().transforms.items)
|
| 642 |
+
|
| 643 |
+
parser = parser_ctor(help="Transform project",
|
| 644 |
+
description="""
|
| 645 |
+
Applies some operation to dataset items in the project
|
| 646 |
+
and produces a new project.|n
|
| 647 |
+
|n
|
| 648 |
+
Builtin transforms: %s|n
|
| 649 |
+
|n
|
| 650 |
+
Examples:|n
|
| 651 |
+
- Convert instance polygons to masks:|n
|
| 652 |
+
|s|stransform -t polygons_to_masks
|
| 653 |
+
""" % ', '.join(builtins),
|
| 654 |
+
formatter_class=MultilineFormatter)
|
| 655 |
+
|
| 656 |
+
parser.add_argument('-t', '--transform', required=True,
|
| 657 |
+
help="Transform to apply to the project")
|
| 658 |
+
parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
|
| 659 |
+
help="Directory to save output (default: current dir)")
|
| 660 |
+
parser.add_argument('--overwrite', action='store_true',
|
| 661 |
+
help="Overwrite existing files in the save directory")
|
| 662 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 663 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 664 |
+
parser.add_argument('extra_args', nargs=argparse.REMAINDER, default=None,
|
| 665 |
+
help="Additional arguments for transformation (pass '-- -h' for help)")
|
| 666 |
+
parser.set_defaults(command=transform_command)
|
| 667 |
+
|
| 668 |
+
return parser
|
| 669 |
+
|
| 670 |
+
def transform_command(args):
|
| 671 |
+
project = load_project(args.project_dir)
|
| 672 |
+
|
| 673 |
+
dst_dir = args.dst_dir
|
| 674 |
+
if dst_dir:
|
| 675 |
+
if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
|
| 676 |
+
raise CliException("Directory '%s' already exists "
|
| 677 |
+
"(pass --overwrite to overwrite)" % dst_dir)
|
| 678 |
+
else:
|
| 679 |
+
dst_dir = generate_next_file_name('%s-%s' % \
|
| 680 |
+
(project.config.project_name, make_file_name(args.transform)))
|
| 681 |
+
dst_dir = osp.abspath(dst_dir)
|
| 682 |
+
|
| 683 |
+
try:
|
| 684 |
+
transform = project.env.transforms.get(args.transform)
|
| 685 |
+
except KeyError:
|
| 686 |
+
raise CliException("Transform '%s' is not found" % args.transform)
|
| 687 |
+
|
| 688 |
+
extra_args = {}
|
| 689 |
+
if hasattr(transform, 'from_cmdline'):
|
| 690 |
+
extra_args = transform.from_cmdline(args.extra_args)
|
| 691 |
+
|
| 692 |
+
log.info("Loading the project...")
|
| 693 |
+
dataset = project.make_dataset()
|
| 694 |
+
|
| 695 |
+
log.info("Transforming the project...")
|
| 696 |
+
dataset.transform_project(
|
| 697 |
+
method=transform,
|
| 698 |
+
save_dir=dst_dir,
|
| 699 |
+
**extra_args
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
log.info("Transform results have been saved to '%s'" % dst_dir)
|
| 703 |
+
|
| 704 |
+
return 0
|
| 705 |
+
|
| 706 |
+
def build_stats_parser(parser_ctor=argparse.ArgumentParser):
|
| 707 |
+
parser = parser_ctor(help="Get project statistics",
|
| 708 |
+
description="""
|
| 709 |
+
Outputs various project statistics like image mean and std,
|
| 710 |
+
annotations count etc.
|
| 711 |
+
""",
|
| 712 |
+
formatter_class=MultilineFormatter)
|
| 713 |
+
|
| 714 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 715 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 716 |
+
parser.set_defaults(command=stats_command)
|
| 717 |
+
|
| 718 |
+
return parser
|
| 719 |
+
|
| 720 |
+
def stats_command(args):
|
| 721 |
+
project = load_project(args.project_dir)
|
| 722 |
+
|
| 723 |
+
dataset = project.make_dataset()
|
| 724 |
+
stats = {}
|
| 725 |
+
stats.update(compute_image_statistics(dataset))
|
| 726 |
+
stats.update(compute_ann_statistics(dataset))
|
| 727 |
+
|
| 728 |
+
dst_file = generate_next_file_name('statistics', ext='.json')
|
| 729 |
+
log.info("Writing project statistics to '%s'" % dst_file)
|
| 730 |
+
with open(dst_file, 'w') as f:
|
| 731 |
+
json.dump(stats, f, indent=4, sort_keys=True)
|
| 732 |
+
|
| 733 |
+
def build_info_parser(parser_ctor=argparse.ArgumentParser):
|
| 734 |
+
parser = parser_ctor(help="Get project info",
|
| 735 |
+
description="""
|
| 736 |
+
Outputs project info.
|
| 737 |
+
""",
|
| 738 |
+
formatter_class=MultilineFormatter)
|
| 739 |
+
|
| 740 |
+
parser.add_argument('--all', action='store_true',
|
| 741 |
+
help="Print all information")
|
| 742 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 743 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 744 |
+
parser.set_defaults(command=info_command)
|
| 745 |
+
|
| 746 |
+
return parser
|
| 747 |
+
|
| 748 |
+
def info_command(args):
|
| 749 |
+
project = load_project(args.project_dir)
|
| 750 |
+
config = project.config
|
| 751 |
+
env = project.env
|
| 752 |
+
dataset = project.make_dataset()
|
| 753 |
+
|
| 754 |
+
print("Project:")
|
| 755 |
+
print(" name:", config.project_name)
|
| 756 |
+
print(" location:", config.project_dir)
|
| 757 |
+
print("Plugins:")
|
| 758 |
+
print(" importers:", ', '.join(env.importers.items))
|
| 759 |
+
print(" extractors:", ', '.join(env.extractors.items))
|
| 760 |
+
print(" converters:", ', '.join(env.converters.items))
|
| 761 |
+
print(" launchers:", ', '.join(env.launchers.items))
|
| 762 |
+
|
| 763 |
+
print("Sources:")
|
| 764 |
+
for source_name, source in config.sources.items():
|
| 765 |
+
print(" source '%s':" % source_name)
|
| 766 |
+
print(" format:", source.format)
|
| 767 |
+
print(" url:", source.url)
|
| 768 |
+
print(" location:", project.local_source_dir(source_name))
|
| 769 |
+
|
| 770 |
+
def print_extractor_info(extractor, indent=''):
|
| 771 |
+
print("%slength:" % indent, len(extractor))
|
| 772 |
+
|
| 773 |
+
categories = extractor.categories()
|
| 774 |
+
print("%scategories:" % indent, ', '.join(c.name for c in categories))
|
| 775 |
+
|
| 776 |
+
for cat_type, cat in categories.items():
|
| 777 |
+
print("%s %s:" % (indent, cat_type.name))
|
| 778 |
+
if cat_type == AnnotationType.label:
|
| 779 |
+
print("%s count:" % indent, len(cat.items))
|
| 780 |
+
|
| 781 |
+
count_threshold = 10
|
| 782 |
+
if args.all:
|
| 783 |
+
count_threshold = len(cat.items)
|
| 784 |
+
labels = ', '.join(c.name for c in cat.items[:count_threshold])
|
| 785 |
+
if count_threshold < len(cat.items):
|
| 786 |
+
labels += " (and %s more)" % (
|
| 787 |
+
len(cat.items) - count_threshold)
|
| 788 |
+
print("%s labels:" % indent, labels)
|
| 789 |
+
|
| 790 |
+
print("Dataset:")
|
| 791 |
+
print_extractor_info(dataset, indent=" ")
|
| 792 |
+
|
| 793 |
+
subsets = dataset.subsets()
|
| 794 |
+
print(" subsets:", ', '.join(subsets))
|
| 795 |
+
for subset_name in subsets:
|
| 796 |
+
subset = dataset.get_subset(subset_name)
|
| 797 |
+
print(" subset '%s':" % subset_name)
|
| 798 |
+
print_extractor_info(subset, indent=" ")
|
| 799 |
+
|
| 800 |
+
print("Models:")
|
| 801 |
+
for model_name, model in config.models.items():
|
| 802 |
+
print(" model '%s':" % model_name)
|
| 803 |
+
print(" type:", model.launcher)
|
| 804 |
+
|
| 805 |
+
return 0
|
| 806 |
+
|
| 807 |
+
|
| 808 |
+
def build_parser(parser_ctor=argparse.ArgumentParser):
|
| 809 |
+
parser = parser_ctor(
|
| 810 |
+
description="""
|
| 811 |
+
Manipulate projects.|n
|
| 812 |
+
|n
|
| 813 |
+
By default, the project to be operated on is searched for
|
| 814 |
+
in the current directory. An additional '-p' argument can be
|
| 815 |
+
passed to specify project location.
|
| 816 |
+
""",
|
| 817 |
+
formatter_class=MultilineFormatter)
|
| 818 |
+
|
| 819 |
+
subparsers = parser.add_subparsers()
|
| 820 |
+
add_subparser(subparsers, 'create', build_create_parser)
|
| 821 |
+
add_subparser(subparsers, 'import', build_import_parser)
|
| 822 |
+
add_subparser(subparsers, 'export', build_export_parser)
|
| 823 |
+
add_subparser(subparsers, 'filter', build_filter_parser)
|
| 824 |
+
add_subparser(subparsers, 'merge', build_merge_parser)
|
| 825 |
+
add_subparser(subparsers, 'diff', build_diff_parser)
|
| 826 |
+
add_subparser(subparsers, 'ediff', build_ediff_parser)
|
| 827 |
+
add_subparser(subparsers, 'transform', build_transform_parser)
|
| 828 |
+
add_subparser(subparsers, 'info', build_info_parser)
|
| 829 |
+
add_subparser(subparsers, 'stats', build_stats_parser)
|
| 830 |
+
|
| 831 |
+
return parser
|
testbed/openvinotoolkit__datumaro/datumaro/cli/contexts/source/__init__.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
import logging as log
|
| 8 |
+
import os
|
| 9 |
+
import os.path as osp
|
| 10 |
+
import shutil
|
| 11 |
+
|
| 12 |
+
from datumaro.components.project import Environment
|
| 13 |
+
from ...util import add_subparser, CliException, MultilineFormatter
|
| 14 |
+
from ...util.project import load_project
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def build_add_parser(parser_ctor=argparse.ArgumentParser):
|
| 18 |
+
builtins = sorted(Environment().extractors.items)
|
| 19 |
+
|
| 20 |
+
base_parser = argparse.ArgumentParser(add_help=False)
|
| 21 |
+
base_parser.add_argument('-n', '--name', default=None,
|
| 22 |
+
help="Name of the new source")
|
| 23 |
+
base_parser.add_argument('-f', '--format', required=True,
|
| 24 |
+
help="Source dataset format")
|
| 25 |
+
base_parser.add_argument('--skip-check', action='store_true',
|
| 26 |
+
help="Skip source checking")
|
| 27 |
+
base_parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 28 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 29 |
+
|
| 30 |
+
parser = parser_ctor(help="Add data source to project",
|
| 31 |
+
description="""
|
| 32 |
+
Adds a data source to a project. The source can be:|n
|
| 33 |
+
- a dataset in a supported format (check 'formats' section below)|n
|
| 34 |
+
- a Datumaro project|n
|
| 35 |
+
|n
|
| 36 |
+
The source can be either a local directory or a remote
|
| 37 |
+
git repository. Each source type has its own parameters, which can
|
| 38 |
+
be checked by:|n
|
| 39 |
+
'%s'.|n
|
| 40 |
+
|n
|
| 41 |
+
Formats:|n
|
| 42 |
+
Datasets come in a wide variety of formats. Each dataset
|
| 43 |
+
format defines its own data structure and rules on how to
|
| 44 |
+
interpret the data. For example, the following data structure
|
| 45 |
+
is used in COCO format:|n
|
| 46 |
+
/dataset/|n
|
| 47 |
+
- /images/<id>.jpg|n
|
| 48 |
+
- /annotations/|n
|
| 49 |
+
|n
|
| 50 |
+
In Datumaro dataset formats are supported by Extractor-s.
|
| 51 |
+
An Extractor produces a list of dataset items corresponding
|
| 52 |
+
to the dataset. It is possible to add a custom Extractor.
|
| 53 |
+
To do this, you need to put an Extractor
|
| 54 |
+
definition script to <project_dir>/.datumaro/extractors.|n
|
| 55 |
+
|n
|
| 56 |
+
List of builtin source formats: %s|n
|
| 57 |
+
|n
|
| 58 |
+
Examples:|n
|
| 59 |
+
- Add a local directory with VOC-like dataset:|n
|
| 60 |
+
|s|sadd path path/to/voc -f voc_detection|n
|
| 61 |
+
- Add a local file with CVAT annotations, call it 'mysource'|n
|
| 62 |
+
|s|s|s|sto the project somewhere else:|n
|
| 63 |
+
|s|sadd path path/to/cvat.xml -f cvat -n mysource -p somewhere/else/
|
| 64 |
+
""" % ('%(prog)s SOURCE_TYPE --help', ', '.join(builtins)),
|
| 65 |
+
formatter_class=MultilineFormatter,
|
| 66 |
+
add_help=False)
|
| 67 |
+
parser.set_defaults(command=add_command)
|
| 68 |
+
|
| 69 |
+
sp = parser.add_subparsers(dest='source_type', metavar='SOURCE_TYPE',
|
| 70 |
+
help="The type of the data source "
|
| 71 |
+
"(call '%s SOURCE_TYPE --help' for more info)" % parser.prog)
|
| 72 |
+
|
| 73 |
+
dir_parser = sp.add_parser('path', help="Add local path as source",
|
| 74 |
+
parents=[base_parser])
|
| 75 |
+
dir_parser.add_argument('url',
|
| 76 |
+
help="Path to the source")
|
| 77 |
+
dir_parser.add_argument('--copy', action='store_true',
|
| 78 |
+
help="Copy the dataset instead of saving source links")
|
| 79 |
+
|
| 80 |
+
repo_parser = sp.add_parser('git', help="Add git repository as source",
|
| 81 |
+
parents=[base_parser])
|
| 82 |
+
repo_parser.add_argument('url',
|
| 83 |
+
help="URL of the source git repository")
|
| 84 |
+
repo_parser.add_argument('-b', '--branch', default='master',
|
| 85 |
+
help="Branch of the source repository (default: %(default)s)")
|
| 86 |
+
repo_parser.add_argument('--checkout', action='store_true',
|
| 87 |
+
help="Do branch checkout")
|
| 88 |
+
|
| 89 |
+
# NOTE: add common parameters to the parent help output
|
| 90 |
+
# the other way could be to use parse_known_args()
|
| 91 |
+
display_parser = argparse.ArgumentParser(
|
| 92 |
+
parents=[base_parser, parser],
|
| 93 |
+
prog=parser.prog, usage="%(prog)s [-h] SOURCE_TYPE ...",
|
| 94 |
+
description=parser.description, formatter_class=MultilineFormatter)
|
| 95 |
+
class HelpAction(argparse._HelpAction):
|
| 96 |
+
def __call__(self, parser, namespace, values, option_string=None):
|
| 97 |
+
display_parser.print_help()
|
| 98 |
+
parser.exit()
|
| 99 |
+
|
| 100 |
+
parser.add_argument('-h', '--help', action=HelpAction,
|
| 101 |
+
help='show this help message and exit')
|
| 102 |
+
|
| 103 |
+
# TODO: needed distinction on how to add an extractor or a remote source
|
| 104 |
+
|
| 105 |
+
return parser
|
| 106 |
+
|
| 107 |
+
def add_command(args):
|
| 108 |
+
project = load_project(args.project_dir)
|
| 109 |
+
|
| 110 |
+
if args.source_type == 'git':
|
| 111 |
+
name = args.name
|
| 112 |
+
if name is None:
|
| 113 |
+
name = osp.splitext(osp.basename(args.url))[0]
|
| 114 |
+
|
| 115 |
+
if project.env.git.has_submodule(name):
|
| 116 |
+
raise CliException("Git submodule '%s' already exists" % name)
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
project.get_source(name)
|
| 120 |
+
raise CliException("Source '%s' already exists" % name)
|
| 121 |
+
except KeyError:
|
| 122 |
+
pass
|
| 123 |
+
|
| 124 |
+
rel_local_dir = project.local_source_dir(name)
|
| 125 |
+
local_dir = osp.join(project.config.project_dir, rel_local_dir)
|
| 126 |
+
url = args.url
|
| 127 |
+
project.env.git.create_submodule(name, local_dir,
|
| 128 |
+
url=url, branch=args.branch, no_checkout=not args.checkout)
|
| 129 |
+
elif args.source_type == 'path':
|
| 130 |
+
url = osp.abspath(args.url)
|
| 131 |
+
if not osp.exists(url):
|
| 132 |
+
raise CliException("Source path '%s' does not exist" % url)
|
| 133 |
+
|
| 134 |
+
name = args.name
|
| 135 |
+
if name is None:
|
| 136 |
+
name = osp.splitext(osp.basename(url))[0]
|
| 137 |
+
|
| 138 |
+
if project.env.git.has_submodule(name):
|
| 139 |
+
raise CliException("Git submodule '%s' already exists" % name)
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
project.get_source(name)
|
| 143 |
+
raise CliException("Source '%s' already exists" % name)
|
| 144 |
+
except KeyError:
|
| 145 |
+
pass
|
| 146 |
+
|
| 147 |
+
rel_local_dir = project.local_source_dir(name)
|
| 148 |
+
local_dir = osp.join(project.config.project_dir, rel_local_dir)
|
| 149 |
+
|
| 150 |
+
if args.copy:
|
| 151 |
+
log.info("Copying from '%s' to '%s'" % (url, local_dir))
|
| 152 |
+
if osp.isdir(url):
|
| 153 |
+
# copytree requires destination dir not to exist
|
| 154 |
+
shutil.copytree(url, local_dir)
|
| 155 |
+
url = rel_local_dir
|
| 156 |
+
elif osp.isfile(url):
|
| 157 |
+
os.makedirs(local_dir)
|
| 158 |
+
shutil.copy2(url, local_dir)
|
| 159 |
+
url = osp.join(rel_local_dir, osp.basename(url))
|
| 160 |
+
else:
|
| 161 |
+
raise Exception("Expected file or directory")
|
| 162 |
+
else:
|
| 163 |
+
os.makedirs(local_dir)
|
| 164 |
+
|
| 165 |
+
project.add_source(name, { 'url': url, 'format': args.format })
|
| 166 |
+
|
| 167 |
+
if not args.skip_check:
|
| 168 |
+
log.info("Checking the source...")
|
| 169 |
+
try:
|
| 170 |
+
project.make_source_project(name).make_dataset()
|
| 171 |
+
except Exception:
|
| 172 |
+
shutil.rmtree(local_dir, ignore_errors=True)
|
| 173 |
+
raise
|
| 174 |
+
|
| 175 |
+
project.save()
|
| 176 |
+
|
| 177 |
+
log.info("Source '%s' has been added to the project, location: '%s'" \
|
| 178 |
+
% (name, rel_local_dir))
|
| 179 |
+
|
| 180 |
+
return 0
|
| 181 |
+
|
| 182 |
+
def build_remove_parser(parser_ctor=argparse.ArgumentParser):
|
| 183 |
+
parser = parser_ctor(help="Remove source from project",
|
| 184 |
+
description="Remove a source from a project.")
|
| 185 |
+
|
| 186 |
+
parser.add_argument('-n', '--name', required=True,
|
| 187 |
+
help="Name of the source to be removed")
|
| 188 |
+
parser.add_argument('--force', action='store_true',
|
| 189 |
+
help="Ignore possible errors during removal")
|
| 190 |
+
parser.add_argument('--keep-data', action='store_true',
|
| 191 |
+
help="Do not remove source data")
|
| 192 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 193 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 194 |
+
parser.set_defaults(command=remove_command)
|
| 195 |
+
|
| 196 |
+
return parser
|
| 197 |
+
|
| 198 |
+
def remove_command(args):
|
| 199 |
+
project = load_project(args.project_dir)
|
| 200 |
+
|
| 201 |
+
name = args.name
|
| 202 |
+
if not name:
|
| 203 |
+
raise CliException("Expected source name")
|
| 204 |
+
try:
|
| 205 |
+
project.get_source(name)
|
| 206 |
+
except KeyError:
|
| 207 |
+
if not args.force:
|
| 208 |
+
raise CliException("Source '%s' does not exist" % name)
|
| 209 |
+
|
| 210 |
+
if project.env.git.has_submodule(name):
|
| 211 |
+
if args.force:
|
| 212 |
+
log.warning("Forcefully removing the '%s' source..." % name)
|
| 213 |
+
|
| 214 |
+
project.env.git.remove_submodule(name, force=args.force)
|
| 215 |
+
|
| 216 |
+
source_dir = osp.join(project.config.project_dir,
|
| 217 |
+
project.local_source_dir(name))
|
| 218 |
+
project.remove_source(name)
|
| 219 |
+
project.save()
|
| 220 |
+
|
| 221 |
+
if not args.keep_data:
|
| 222 |
+
shutil.rmtree(source_dir, ignore_errors=True)
|
| 223 |
+
|
| 224 |
+
log.info("Source '%s' has been removed from the project" % name)
|
| 225 |
+
|
| 226 |
+
return 0
|
| 227 |
+
|
| 228 |
+
def build_info_parser(parser_ctor=argparse.ArgumentParser):
|
| 229 |
+
parser = parser_ctor()
|
| 230 |
+
|
| 231 |
+
parser.add_argument('-n', '--name',
|
| 232 |
+
help="Source name")
|
| 233 |
+
parser.add_argument('-v', '--verbose', action='store_true',
|
| 234 |
+
help="Show details")
|
| 235 |
+
parser.add_argument('-p', '--project', dest='project_dir', default='.',
|
| 236 |
+
help="Directory of the project to operate on (default: current dir)")
|
| 237 |
+
parser.set_defaults(command=info_command)
|
| 238 |
+
|
| 239 |
+
return parser
|
| 240 |
+
|
| 241 |
+
def info_command(args):
|
| 242 |
+
project = load_project(args.project_dir)
|
| 243 |
+
|
| 244 |
+
if args.name:
|
| 245 |
+
source = project.get_source(args.name)
|
| 246 |
+
print(source)
|
| 247 |
+
else:
|
| 248 |
+
for name, conf in project.config.sources.items():
|
| 249 |
+
print(name)
|
| 250 |
+
if args.verbose:
|
| 251 |
+
print(dict(conf))
|
| 252 |
+
|
| 253 |
+
def build_parser(parser_ctor=argparse.ArgumentParser):
|
| 254 |
+
parser = parser_ctor(description="""
|
| 255 |
+
Manipulate data sources inside of a project.|n
|
| 256 |
+
|n
|
| 257 |
+
A data source is a source of data for a project.
|
| 258 |
+
The project combines multiple data sources into one dataset.
|
| 259 |
+
The role of a data source is to provide dataset items - images
|
| 260 |
+
and/or annotations.|n
|
| 261 |
+
|n
|
| 262 |
+
By default, the project to be operated on is searched for
|
| 263 |
+
in the current directory. An additional '-p' argument can be
|
| 264 |
+
passed to specify project location.
|
| 265 |
+
""",
|
| 266 |
+
formatter_class=MultilineFormatter)
|
| 267 |
+
|
| 268 |
+
subparsers = parser.add_subparsers()
|
| 269 |
+
add_subparser(subparsers, 'add', build_add_parser)
|
| 270 |
+
add_subparser(subparsers, 'remove', build_remove_parser)
|
| 271 |
+
add_subparser(subparsers, 'info', build_info_parser)
|
| 272 |
+
|
| 273 |
+
return parser
|
testbed/openvinotoolkit__datumaro/datumaro/cli/util/__init__.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
import textwrap
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CliException(Exception): pass
|
| 11 |
+
|
| 12 |
+
def add_subparser(subparsers, name, builder):
|
| 13 |
+
return builder(lambda **kwargs: subparsers.add_parser(name, **kwargs))
|
| 14 |
+
|
| 15 |
+
class MultilineFormatter(argparse.HelpFormatter):
|
| 16 |
+
"""
|
| 17 |
+
Keeps line breaks introduced with '|n' separator
|
| 18 |
+
and spaces introduced with '|s'.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, keep_natural=False, **kwargs):
|
| 22 |
+
super().__init__(**kwargs)
|
| 23 |
+
self._keep_natural = keep_natural
|
| 24 |
+
|
| 25 |
+
def _fill_text(self, text, width, indent):
|
| 26 |
+
text = self._whitespace_matcher.sub(' ', text).strip()
|
| 27 |
+
text = text.replace('|s', ' ')
|
| 28 |
+
|
| 29 |
+
paragraphs = text.split('|n ')
|
| 30 |
+
if self._keep_natural:
|
| 31 |
+
paragraphs = sum((p.split('\n ') for p in paragraphs), [])
|
| 32 |
+
|
| 33 |
+
multiline_text = ''
|
| 34 |
+
for paragraph in paragraphs:
|
| 35 |
+
formatted_paragraph = textwrap.fill(paragraph, width,
|
| 36 |
+
initial_indent=indent, subsequent_indent=indent) + '\n'
|
| 37 |
+
multiline_text += formatted_paragraph
|
| 38 |
+
return multiline_text
|
| 39 |
+
|
| 40 |
+
def required_count(nmin=0, nmax=0):
|
| 41 |
+
assert 0 <= nmin and 0 <= nmax and nmin or nmax
|
| 42 |
+
|
| 43 |
+
class RequiredCount(argparse.Action):
|
| 44 |
+
def __call__(self, parser, args, values, option_string=None):
|
| 45 |
+
k = len(values)
|
| 46 |
+
if not ((nmin and (nmin <= k) or not nmin) and \
|
| 47 |
+
(nmax and (k <= nmax) or not nmax)):
|
| 48 |
+
msg = "Argument '%s' requires" % self.dest
|
| 49 |
+
if nmin and nmax:
|
| 50 |
+
msg += " from %s to %s arguments" % (nmin, nmax)
|
| 51 |
+
elif nmin:
|
| 52 |
+
msg += " at least %s arguments" % nmin
|
| 53 |
+
else:
|
| 54 |
+
msg += " no more %s arguments" % nmax
|
| 55 |
+
raise argparse.ArgumentTypeError(msg)
|
| 56 |
+
setattr(args, self.dest, values)
|
| 57 |
+
return RequiredCount
|
| 58 |
+
|
| 59 |
+
def at_least(n):
|
| 60 |
+
return required_count(n, 0)
|
| 61 |
+
|
| 62 |
+
def make_file_name(s):
|
| 63 |
+
# adapted from
|
| 64 |
+
# https://docs.djangoproject.com/en/2.1/_modules/django/utils/text/#slugify
|
| 65 |
+
"""
|
| 66 |
+
Normalizes string, converts to lowercase, removes non-alpha characters,
|
| 67 |
+
and converts spaces to hyphens.
|
| 68 |
+
"""
|
| 69 |
+
import unicodedata, re
|
| 70 |
+
s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')
|
| 71 |
+
s = s.decode()
|
| 72 |
+
s = re.sub(r'[^\w\s-]', '', s).strip().lower()
|
| 73 |
+
s = re.sub(r'[-\s]+', '-', s)
|
| 74 |
+
return s
|
testbed/openvinotoolkit__datumaro/datumaro/cli/util/project.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
from datumaro.components.project import Project
|
| 10 |
+
from datumaro.util import cast
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def load_project(project_dir):
|
| 14 |
+
return Project.load(project_dir)
|
| 15 |
+
|
| 16 |
+
def generate_next_file_name(basename, basedir='.', sep='.', ext=''):
|
| 17 |
+
"""
|
| 18 |
+
If basedir does not contain basename, returns basename,
|
| 19 |
+
otherwise generates a name by appending sep to the basename
|
| 20 |
+
and the number, next to the last used number in the basedir for
|
| 21 |
+
files with basename prefix. Optionally, appends ext.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
return generate_next_name(os.listdir(basedir), basename, sep, ext)
|
| 25 |
+
|
| 26 |
+
def generate_next_name(names, basename, sep='.', suffix='', default=None):
|
| 27 |
+
pattern = re.compile(r'%s(?:%s(\d+))?%s' % \
|
| 28 |
+
tuple(map(re.escape, [basename, sep, suffix])))
|
| 29 |
+
matches = [match for match in (pattern.match(n) for n in names) if match]
|
| 30 |
+
|
| 31 |
+
max_idx = max([cast(match[1], int, 0) for match in matches], default=None)
|
| 32 |
+
if max_idx is None:
|
| 33 |
+
if default is not None:
|
| 34 |
+
idx = sep + str(default)
|
| 35 |
+
else:
|
| 36 |
+
idx = ''
|
| 37 |
+
else:
|
| 38 |
+
idx = sep + str(max_idx + 1)
|
| 39 |
+
return basename + idx + suffix
|
testbed/openvinotoolkit__datumaro/datumaro/components/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
testbed/openvinotoolkit__datumaro/datumaro/components/algorithms/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
testbed/openvinotoolkit__datumaro/datumaro/components/cli_plugin.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import argparse
|
| 7 |
+
|
| 8 |
+
from datumaro.cli.util import MultilineFormatter
|
| 9 |
+
from datumaro.util import to_snake_case
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CliPlugin:
|
| 13 |
+
@staticmethod
|
| 14 |
+
def _get_name(cls):
|
| 15 |
+
return getattr(cls, 'NAME',
|
| 16 |
+
remove_plugin_type(to_snake_case(cls.__name__)))
|
| 17 |
+
|
| 18 |
+
@staticmethod
|
| 19 |
+
def _get_doc(cls):
|
| 20 |
+
return getattr(cls, '__doc__', "")
|
| 21 |
+
|
| 22 |
+
@classmethod
|
| 23 |
+
def build_cmdline_parser(cls, **kwargs):
|
| 24 |
+
args = {
|
| 25 |
+
'prog': cls._get_name(cls),
|
| 26 |
+
'description': cls._get_doc(cls),
|
| 27 |
+
'formatter_class': MultilineFormatter,
|
| 28 |
+
}
|
| 29 |
+
args.update(kwargs)
|
| 30 |
+
|
| 31 |
+
return argparse.ArgumentParser(**args)
|
| 32 |
+
|
| 33 |
+
@classmethod
|
| 34 |
+
def from_cmdline(cls, args=None):
|
| 35 |
+
if args and args[0] == '--':
|
| 36 |
+
args = args[1:]
|
| 37 |
+
parser = cls.build_cmdline_parser()
|
| 38 |
+
args = parser.parse_args(args)
|
| 39 |
+
return vars(args)
|
| 40 |
+
|
| 41 |
+
def remove_plugin_type(s):
|
| 42 |
+
for t in {'transform', 'extractor', 'converter', 'launcher', 'importer'}:
|
| 43 |
+
s = s.replace('_' + t, '')
|
| 44 |
+
return s
|
testbed/openvinotoolkit__datumaro/datumaro/components/config_model.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from datumaro.components.config import Config, \
|
| 7 |
+
DefaultConfig as _DefaultConfig, \
|
| 8 |
+
SchemaBuilder as _SchemaBuilder
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SOURCE_SCHEMA = _SchemaBuilder() \
|
| 12 |
+
.add('url', str) \
|
| 13 |
+
.add('format', str) \
|
| 14 |
+
.add('options', dict) \
|
| 15 |
+
.build()
|
| 16 |
+
|
| 17 |
+
class Source(Config):
|
| 18 |
+
def __init__(self, config=None):
|
| 19 |
+
super().__init__(config, schema=SOURCE_SCHEMA)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
MODEL_SCHEMA = _SchemaBuilder() \
|
| 23 |
+
.add('launcher', str) \
|
| 24 |
+
.add('options', dict) \
|
| 25 |
+
.build()
|
| 26 |
+
|
| 27 |
+
class Model(Config):
|
| 28 |
+
def __init__(self, config=None):
|
| 29 |
+
super().__init__(config, schema=MODEL_SCHEMA)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
PROJECT_SCHEMA = _SchemaBuilder() \
|
| 33 |
+
.add('project_name', str) \
|
| 34 |
+
.add('format_version', int) \
|
| 35 |
+
\
|
| 36 |
+
.add('subsets', list) \
|
| 37 |
+
.add('sources', lambda: _DefaultConfig(
|
| 38 |
+
lambda v=None: Source(v))) \
|
| 39 |
+
.add('models', lambda: _DefaultConfig(
|
| 40 |
+
lambda v=None: Model(v))) \
|
| 41 |
+
\
|
| 42 |
+
.add('models_dir', str, internal=True) \
|
| 43 |
+
.add('plugins_dir', str, internal=True) \
|
| 44 |
+
.add('sources_dir', str, internal=True) \
|
| 45 |
+
.add('dataset_dir', str, internal=True) \
|
| 46 |
+
.add('project_filename', str, internal=True) \
|
| 47 |
+
.add('project_dir', str, internal=True) \
|
| 48 |
+
.add('env_dir', str, internal=True) \
|
| 49 |
+
.build()
|
| 50 |
+
|
| 51 |
+
PROJECT_DEFAULT_CONFIG = Config({
|
| 52 |
+
'project_name': 'undefined',
|
| 53 |
+
'format_version': 1,
|
| 54 |
+
|
| 55 |
+
'sources_dir': 'sources',
|
| 56 |
+
'dataset_dir': 'dataset',
|
| 57 |
+
'models_dir': 'models',
|
| 58 |
+
'plugins_dir': 'plugins',
|
| 59 |
+
|
| 60 |
+
'project_filename': 'config.yaml',
|
| 61 |
+
'project_dir': '',
|
| 62 |
+
'env_dir': '.datumaro',
|
| 63 |
+
}, mutable=False, schema=PROJECT_SCHEMA)
|
testbed/openvinotoolkit__datumaro/datumaro/components/converter.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import logging as log
|
| 7 |
+
import os
|
| 8 |
+
import os.path as osp
|
| 9 |
+
import shutil
|
| 10 |
+
|
| 11 |
+
from datumaro.components.cli_plugin import CliPlugin
|
| 12 |
+
from datumaro.util.image import save_image, ByteImage
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Converter(CliPlugin):
|
| 16 |
+
DEFAULT_IMAGE_EXT = None
|
| 17 |
+
|
| 18 |
+
@classmethod
|
| 19 |
+
def build_cmdline_parser(cls, **kwargs):
|
| 20 |
+
parser = super().build_cmdline_parser(**kwargs)
|
| 21 |
+
parser.add_argument('--save-images', action='store_true',
|
| 22 |
+
help="Save images (default: %(default)s)")
|
| 23 |
+
parser.add_argument('--image-ext', default=None,
|
| 24 |
+
help="Image extension (default: keep or use format default%s)" % \
|
| 25 |
+
(' ' + cls.DEFAULT_IMAGE_EXT if cls.DEFAULT_IMAGE_EXT else ''))
|
| 26 |
+
|
| 27 |
+
return parser
|
| 28 |
+
|
| 29 |
+
@classmethod
|
| 30 |
+
def convert(cls, extractor, save_dir, **options):
|
| 31 |
+
converter = cls(extractor, save_dir, **options)
|
| 32 |
+
return converter.apply()
|
| 33 |
+
|
| 34 |
+
def apply(self):
|
| 35 |
+
raise NotImplementedError("Should be implemented in a subclass")
|
| 36 |
+
|
| 37 |
+
def __init__(self, extractor, save_dir, save_images=False,
|
| 38 |
+
image_ext=None, default_image_ext=None):
|
| 39 |
+
default_image_ext = default_image_ext or self.DEFAULT_IMAGE_EXT
|
| 40 |
+
assert default_image_ext
|
| 41 |
+
self._default_image_ext = default_image_ext
|
| 42 |
+
|
| 43 |
+
self._save_images = save_images
|
| 44 |
+
self._image_ext = image_ext
|
| 45 |
+
|
| 46 |
+
self._extractor = extractor
|
| 47 |
+
self._save_dir = save_dir
|
| 48 |
+
|
| 49 |
+
def _find_image_ext(self, item):
|
| 50 |
+
src_ext = None
|
| 51 |
+
if item.has_image:
|
| 52 |
+
src_ext = item.image.ext
|
| 53 |
+
|
| 54 |
+
return self._image_ext or src_ext or self._default_image_ext
|
| 55 |
+
|
| 56 |
+
def _make_image_filename(self, item):
|
| 57 |
+
return item.id + self._find_image_ext(item)
|
| 58 |
+
|
| 59 |
+
def _save_image(self, item, path=None):
|
| 60 |
+
if not item.image.has_data:
|
| 61 |
+
log.warning("Item '%s' has no image", item.id)
|
| 62 |
+
return
|
| 63 |
+
|
| 64 |
+
path = path or self._make_image_filename(item)
|
| 65 |
+
|
| 66 |
+
src_ext = item.image.ext.lower()
|
| 67 |
+
dst_ext = osp.splitext(osp.basename(path))[1].lower()
|
| 68 |
+
|
| 69 |
+
os.makedirs(osp.dirname(path), exist_ok=True)
|
| 70 |
+
if src_ext == dst_ext and osp.isfile(item.image.path):
|
| 71 |
+
shutil.copyfile(item.image.path, path)
|
| 72 |
+
elif src_ext == dst_ext and isinstance(item.image, ByteImage):
|
| 73 |
+
with open(path, 'wb') as f:
|
| 74 |
+
f.write(item.image.get_bytes())
|
| 75 |
+
else:
|
| 76 |
+
save_image(path, item.image.data)
|
testbed/openvinotoolkit__datumaro/datumaro/components/dataset.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2020 Intel Corporation
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: MIT
|
| 4 |
+
|
| 5 |
+
from collections import OrderedDict, defaultdict
|
| 6 |
+
from typing import Iterable, Union, Dict, List
|
| 7 |
+
|
| 8 |
+
from datumaro.components.extractor import (Extractor, LabelCategories,
|
| 9 |
+
AnnotationType, DatasetItem, DEFAULT_SUBSET_NAME)
|
| 10 |
+
from datumaro.components.dataset_filter import \
|
| 11 |
+
XPathDatasetFilter, XPathAnnotationsFilter
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Dataset(Extractor):
|
| 15 |
+
class Subset(Extractor):
|
| 16 |
+
def __init__(self, parent):
|
| 17 |
+
self.parent = parent
|
| 18 |
+
self.items = OrderedDict()
|
| 19 |
+
|
| 20 |
+
def __iter__(self):
|
| 21 |
+
yield from self.items.values()
|
| 22 |
+
|
| 23 |
+
def __len__(self):
|
| 24 |
+
return len(self.items)
|
| 25 |
+
|
| 26 |
+
def categories(self):
|
| 27 |
+
return self.parent.categories()
|
| 28 |
+
|
| 29 |
+
@classmethod
|
| 30 |
+
def from_iterable(cls, iterable: Iterable[DatasetItem],
|
| 31 |
+
categories: Union[Dict, List[str]] = None):
|
| 32 |
+
if isinstance(categories, list):
|
| 33 |
+
categories = { AnnotationType.label:
|
| 34 |
+
LabelCategories.from_iterable(categories)
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
if not categories:
|
| 38 |
+
categories = {}
|
| 39 |
+
|
| 40 |
+
class _extractor(Extractor):
|
| 41 |
+
def __iter__(self):
|
| 42 |
+
return iter(iterable)
|
| 43 |
+
|
| 44 |
+
def categories(self):
|
| 45 |
+
return categories
|
| 46 |
+
|
| 47 |
+
return cls.from_extractors(_extractor())
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def from_extractors(cls, *sources):
|
| 51 |
+
categories = cls._merge_categories(s.categories() for s in sources)
|
| 52 |
+
dataset = Dataset(categories=categories)
|
| 53 |
+
|
| 54 |
+
# merge items
|
| 55 |
+
subsets = defaultdict(lambda: cls.Subset(dataset))
|
| 56 |
+
for source in sources:
|
| 57 |
+
for item in source:
|
| 58 |
+
existing_item = subsets[item.subset].items.get(item.id)
|
| 59 |
+
if existing_item is not None:
|
| 60 |
+
path = existing_item.path
|
| 61 |
+
if item.path != path:
|
| 62 |
+
path = None
|
| 63 |
+
item = cls._merge_items(existing_item, item, path=path)
|
| 64 |
+
|
| 65 |
+
subsets[item.subset].items[item.id] = item
|
| 66 |
+
|
| 67 |
+
dataset._subsets = dict(subsets)
|
| 68 |
+
return dataset
|
| 69 |
+
|
| 70 |
+
def __init__(self, categories=None):
|
| 71 |
+
super().__init__()
|
| 72 |
+
|
| 73 |
+
self._subsets = {}
|
| 74 |
+
|
| 75 |
+
if not categories:
|
| 76 |
+
categories = {}
|
| 77 |
+
self._categories = categories
|
| 78 |
+
|
| 79 |
+
def __iter__(self):
|
| 80 |
+
for subset in self._subsets.values():
|
| 81 |
+
for item in subset:
|
| 82 |
+
yield item
|
| 83 |
+
|
| 84 |
+
def __len__(self):
|
| 85 |
+
if self._length is None:
|
| 86 |
+
self._length = sum(len(s) for s in self._subsets.values())
|
| 87 |
+
return self._length
|
| 88 |
+
|
| 89 |
+
def get_subset(self, name):
|
| 90 |
+
return self._subsets[name]
|
| 91 |
+
|
| 92 |
+
def subsets(self):
|
| 93 |
+
return self._subsets
|
| 94 |
+
|
| 95 |
+
def categories(self):
|
| 96 |
+
return self._categories
|
| 97 |
+
|
| 98 |
+
def get(self, item_id, subset=None, path=None):
|
| 99 |
+
if path:
|
| 100 |
+
raise KeyError("Requested dataset item path is not found")
|
| 101 |
+
item_id = str(item_id)
|
| 102 |
+
subset = subset or DEFAULT_SUBSET_NAME
|
| 103 |
+
subset = self._subsets[subset]
|
| 104 |
+
return subset.items[item_id]
|
| 105 |
+
|
| 106 |
+
def put(self, item, item_id=None, subset=None, path=None):
|
| 107 |
+
if path:
|
| 108 |
+
raise KeyError("Requested dataset item path is not found")
|
| 109 |
+
|
| 110 |
+
if item_id is None:
|
| 111 |
+
item_id = item.id
|
| 112 |
+
if subset is None:
|
| 113 |
+
subset = item.subset
|
| 114 |
+
|
| 115 |
+
item = item.wrap(id=item_id, subset=subset, path=None)
|
| 116 |
+
if subset not in self._subsets:
|
| 117 |
+
self._subsets[subset] = self.Subset(self)
|
| 118 |
+
self._subsets[subset].items[item_id] = item
|
| 119 |
+
self._length = None
|
| 120 |
+
|
| 121 |
+
return item
|
| 122 |
+
|
| 123 |
+
def filter(self, expr, filter_annotations=False, remove_empty=False):
|
| 124 |
+
if filter_annotations:
|
| 125 |
+
return self.transform(XPathAnnotationsFilter, expr, remove_empty)
|
| 126 |
+
else:
|
| 127 |
+
return self.transform(XPathDatasetFilter, expr)
|
| 128 |
+
|
| 129 |
+
def update(self, items):
|
| 130 |
+
for item in items:
|
| 131 |
+
self.put(item)
|
| 132 |
+
return self
|
| 133 |
+
|
| 134 |
+
def define_categories(self, categories):
|
| 135 |
+
assert not self._categories
|
| 136 |
+
self._categories = categories
|
| 137 |
+
|
| 138 |
+
@staticmethod
|
| 139 |
+
def _lazy_image(item):
|
| 140 |
+
# NOTE: avoid https://docs.python.org/3/faq/programming.html#why-do-lambdas-defined-in-a-loop-with-different-values-all-return-the-same-result
|
| 141 |
+
return lambda: item.image
|
| 142 |
+
|
| 143 |
+
@classmethod
|
| 144 |
+
def _merge_items(cls, existing_item, current_item, path=None):
|
| 145 |
+
return existing_item.wrap(path=path,
|
| 146 |
+
image=cls._merge_images(existing_item, current_item),
|
| 147 |
+
annotations=cls._merge_anno(
|
| 148 |
+
existing_item.annotations, current_item.annotations))
|
| 149 |
+
|
| 150 |
+
@staticmethod
|
| 151 |
+
def _merge_images(existing_item, current_item):
|
| 152 |
+
image = None
|
| 153 |
+
if existing_item.has_image and current_item.has_image:
|
| 154 |
+
if existing_item.image.has_data:
|
| 155 |
+
image = existing_item.image
|
| 156 |
+
else:
|
| 157 |
+
image = current_item.image
|
| 158 |
+
|
| 159 |
+
if existing_item.image.path != current_item.image.path:
|
| 160 |
+
if not existing_item.image.path:
|
| 161 |
+
image._path = current_item.image.path
|
| 162 |
+
|
| 163 |
+
if all([existing_item.image._size, current_item.image._size]):
|
| 164 |
+
assert existing_item.image._size == current_item.image._size, "Image info differs for item '%s'" % existing_item.id
|
| 165 |
+
elif existing_item.image._size:
|
| 166 |
+
image._size = existing_item.image._size
|
| 167 |
+
else:
|
| 168 |
+
image._size = current_item.image._size
|
| 169 |
+
elif existing_item.has_image:
|
| 170 |
+
image = existing_item.image
|
| 171 |
+
else:
|
| 172 |
+
image = current_item.image
|
| 173 |
+
|
| 174 |
+
return image
|
| 175 |
+
|
| 176 |
+
@staticmethod
|
| 177 |
+
def _merge_anno(a, b):
|
| 178 |
+
# TODO: implement properly with merging and annotations remapping
|
| 179 |
+
from .operations import merge_annotations_equal
|
| 180 |
+
return merge_annotations_equal(a, b)
|
| 181 |
+
|
| 182 |
+
@staticmethod
|
| 183 |
+
def _merge_categories(sources):
|
| 184 |
+
# TODO: implement properly with merging and annotations remapping
|
| 185 |
+
from .operations import merge_categories
|
| 186 |
+
return merge_categories(sources)
|
testbed/openvinotoolkit__datumaro/datumaro/components/dataset_filter.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import logging as log
|
| 7 |
+
from lxml import etree as ET # lxml has proper XPath implementation
|
| 8 |
+
from datumaro.components.extractor import (Transform,
|
| 9 |
+
Annotation, AnnotationType,
|
| 10 |
+
Label, Mask, Points, Polygon, PolyLine, Bbox, Caption,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class DatasetItemEncoder:
|
| 15 |
+
@classmethod
|
| 16 |
+
def encode(cls, item, categories=None):
|
| 17 |
+
item_elem = ET.Element('item')
|
| 18 |
+
ET.SubElement(item_elem, 'id').text = str(item.id)
|
| 19 |
+
ET.SubElement(item_elem, 'subset').text = str(item.subset)
|
| 20 |
+
ET.SubElement(item_elem, 'path').text = str('/'.join(item.path))
|
| 21 |
+
|
| 22 |
+
image = item.image
|
| 23 |
+
if image is not None:
|
| 24 |
+
item_elem.append(cls.encode_image(image))
|
| 25 |
+
|
| 26 |
+
for ann in item.annotations:
|
| 27 |
+
item_elem.append(cls.encode_annotation(ann, categories))
|
| 28 |
+
|
| 29 |
+
return item_elem
|
| 30 |
+
|
| 31 |
+
@classmethod
|
| 32 |
+
def encode_image(cls, image):
|
| 33 |
+
image_elem = ET.Element('image')
|
| 34 |
+
|
| 35 |
+
size = image.size
|
| 36 |
+
if size is not None:
|
| 37 |
+
h, w = size
|
| 38 |
+
else:
|
| 39 |
+
h = 'unknown'
|
| 40 |
+
w = h
|
| 41 |
+
ET.SubElement(image_elem, 'width').text = str(w)
|
| 42 |
+
ET.SubElement(image_elem, 'height').text = str(h)
|
| 43 |
+
|
| 44 |
+
ET.SubElement(image_elem, 'has_data').text = '%d' % int(image.has_data)
|
| 45 |
+
ET.SubElement(image_elem, 'path').text = image.path
|
| 46 |
+
|
| 47 |
+
return image_elem
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def encode_annotation_base(cls, annotation):
|
| 51 |
+
assert isinstance(annotation, Annotation)
|
| 52 |
+
ann_elem = ET.Element('annotation')
|
| 53 |
+
ET.SubElement(ann_elem, 'id').text = str(annotation.id)
|
| 54 |
+
ET.SubElement(ann_elem, 'type').text = str(annotation.type.name)
|
| 55 |
+
|
| 56 |
+
for k, v in annotation.attributes.items():
|
| 57 |
+
ET.SubElement(ann_elem, k.replace(' ', '-')).text = str(v)
|
| 58 |
+
|
| 59 |
+
ET.SubElement(ann_elem, 'group').text = str(annotation.group)
|
| 60 |
+
|
| 61 |
+
return ann_elem
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def _get_label(label_id, categories):
|
| 65 |
+
label = ''
|
| 66 |
+
if label_id is None:
|
| 67 |
+
return ''
|
| 68 |
+
if categories is not None:
|
| 69 |
+
label_cat = categories.get(AnnotationType.label)
|
| 70 |
+
if label_cat is not None:
|
| 71 |
+
label = label_cat.items[label_id].name
|
| 72 |
+
return label
|
| 73 |
+
|
| 74 |
+
@classmethod
|
| 75 |
+
def encode_label_object(cls, obj, categories):
|
| 76 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 77 |
+
|
| 78 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 79 |
+
str(cls._get_label(obj.label, categories))
|
| 80 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 81 |
+
|
| 82 |
+
return ann_elem
|
| 83 |
+
|
| 84 |
+
@classmethod
|
| 85 |
+
def encode_mask_object(cls, obj, categories):
|
| 86 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 87 |
+
|
| 88 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 89 |
+
str(cls._get_label(obj.label, categories))
|
| 90 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 91 |
+
|
| 92 |
+
return ann_elem
|
| 93 |
+
|
| 94 |
+
@classmethod
|
| 95 |
+
def encode_bbox_object(cls, obj, categories):
|
| 96 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 97 |
+
|
| 98 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 99 |
+
str(cls._get_label(obj.label, categories))
|
| 100 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 101 |
+
ET.SubElement(ann_elem, 'x').text = str(obj.x)
|
| 102 |
+
ET.SubElement(ann_elem, 'y').text = str(obj.y)
|
| 103 |
+
ET.SubElement(ann_elem, 'w').text = str(obj.w)
|
| 104 |
+
ET.SubElement(ann_elem, 'h').text = str(obj.h)
|
| 105 |
+
ET.SubElement(ann_elem, 'area').text = str(obj.get_area())
|
| 106 |
+
|
| 107 |
+
return ann_elem
|
| 108 |
+
|
| 109 |
+
@classmethod
|
| 110 |
+
def encode_points_object(cls, obj, categories):
|
| 111 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 112 |
+
|
| 113 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 114 |
+
str(cls._get_label(obj.label, categories))
|
| 115 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 116 |
+
|
| 117 |
+
x, y, w, h = obj.get_bbox()
|
| 118 |
+
area = w * h
|
| 119 |
+
bbox_elem = ET.SubElement(ann_elem, 'bbox')
|
| 120 |
+
ET.SubElement(bbox_elem, 'x').text = str(x)
|
| 121 |
+
ET.SubElement(bbox_elem, 'y').text = str(y)
|
| 122 |
+
ET.SubElement(bbox_elem, 'w').text = str(w)
|
| 123 |
+
ET.SubElement(bbox_elem, 'h').text = str(h)
|
| 124 |
+
ET.SubElement(bbox_elem, 'area').text = str(area)
|
| 125 |
+
|
| 126 |
+
points = obj.points
|
| 127 |
+
for i in range(0, len(points), 2):
|
| 128 |
+
point_elem = ET.SubElement(ann_elem, 'point')
|
| 129 |
+
ET.SubElement(point_elem, 'x').text = str(points[i])
|
| 130 |
+
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
|
| 131 |
+
ET.SubElement(point_elem, 'visible').text = \
|
| 132 |
+
str(obj.visibility[i // 2].name)
|
| 133 |
+
|
| 134 |
+
return ann_elem
|
| 135 |
+
|
| 136 |
+
@classmethod
|
| 137 |
+
def encode_polygon_object(cls, obj, categories):
|
| 138 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 139 |
+
|
| 140 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 141 |
+
str(cls._get_label(obj.label, categories))
|
| 142 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 143 |
+
|
| 144 |
+
x, y, w, h = obj.get_bbox()
|
| 145 |
+
area = w * h
|
| 146 |
+
bbox_elem = ET.SubElement(ann_elem, 'bbox')
|
| 147 |
+
ET.SubElement(bbox_elem, 'x').text = str(x)
|
| 148 |
+
ET.SubElement(bbox_elem, 'y').text = str(y)
|
| 149 |
+
ET.SubElement(bbox_elem, 'w').text = str(w)
|
| 150 |
+
ET.SubElement(bbox_elem, 'h').text = str(h)
|
| 151 |
+
ET.SubElement(bbox_elem, 'area').text = str(area)
|
| 152 |
+
|
| 153 |
+
points = obj.points
|
| 154 |
+
for i in range(0, len(points), 2):
|
| 155 |
+
point_elem = ET.SubElement(ann_elem, 'point')
|
| 156 |
+
ET.SubElement(point_elem, 'x').text = str(points[i])
|
| 157 |
+
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
|
| 158 |
+
|
| 159 |
+
return ann_elem
|
| 160 |
+
|
| 161 |
+
@classmethod
|
| 162 |
+
def encode_polyline_object(cls, obj, categories):
|
| 163 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 164 |
+
|
| 165 |
+
ET.SubElement(ann_elem, 'label').text = \
|
| 166 |
+
str(cls._get_label(obj.label, categories))
|
| 167 |
+
ET.SubElement(ann_elem, 'label_id').text = str(obj.label)
|
| 168 |
+
|
| 169 |
+
x, y, w, h = obj.get_bbox()
|
| 170 |
+
area = w * h
|
| 171 |
+
bbox_elem = ET.SubElement(ann_elem, 'bbox')
|
| 172 |
+
ET.SubElement(bbox_elem, 'x').text = str(x)
|
| 173 |
+
ET.SubElement(bbox_elem, 'y').text = str(y)
|
| 174 |
+
ET.SubElement(bbox_elem, 'w').text = str(w)
|
| 175 |
+
ET.SubElement(bbox_elem, 'h').text = str(h)
|
| 176 |
+
ET.SubElement(bbox_elem, 'area').text = str(area)
|
| 177 |
+
|
| 178 |
+
points = obj.points
|
| 179 |
+
for i in range(0, len(points), 2):
|
| 180 |
+
point_elem = ET.SubElement(ann_elem, 'point')
|
| 181 |
+
ET.SubElement(point_elem, 'x').text = str(points[i])
|
| 182 |
+
ET.SubElement(point_elem, 'y').text = str(points[i + 1])
|
| 183 |
+
|
| 184 |
+
return ann_elem
|
| 185 |
+
|
| 186 |
+
@classmethod
|
| 187 |
+
def encode_caption_object(cls, obj):
|
| 188 |
+
ann_elem = cls.encode_annotation_base(obj)
|
| 189 |
+
|
| 190 |
+
ET.SubElement(ann_elem, 'caption').text = str(obj.caption)
|
| 191 |
+
|
| 192 |
+
return ann_elem
|
| 193 |
+
|
| 194 |
+
@classmethod
|
| 195 |
+
def encode_annotation(cls, o, categories=None):
|
| 196 |
+
if isinstance(o, Label):
|
| 197 |
+
return cls.encode_label_object(o, categories)
|
| 198 |
+
if isinstance(o, Mask):
|
| 199 |
+
return cls.encode_mask_object(o, categories)
|
| 200 |
+
if isinstance(o, Bbox):
|
| 201 |
+
return cls.encode_bbox_object(o, categories)
|
| 202 |
+
if isinstance(o, Points):
|
| 203 |
+
return cls.encode_points_object(o, categories)
|
| 204 |
+
if isinstance(o, PolyLine):
|
| 205 |
+
return cls.encode_polyline_object(o, categories)
|
| 206 |
+
if isinstance(o, Polygon):
|
| 207 |
+
return cls.encode_polygon_object(o, categories)
|
| 208 |
+
if isinstance(o, Caption):
|
| 209 |
+
return cls.encode_caption_object(o)
|
| 210 |
+
raise NotImplementedError("Unexpected annotation object passed: %s" % o)
|
| 211 |
+
|
| 212 |
+
@staticmethod
|
| 213 |
+
def to_string(encoded_item):
|
| 214 |
+
return ET.tostring(encoded_item, encoding='unicode', pretty_print=True)
|
| 215 |
+
|
| 216 |
+
def XPathDatasetFilter(extractor, xpath=None):
|
| 217 |
+
if xpath is None:
|
| 218 |
+
return extractor
|
| 219 |
+
try:
|
| 220 |
+
xpath = ET.XPath(xpath)
|
| 221 |
+
except Exception:
|
| 222 |
+
log.error("Failed to create XPath from expression '%s'", xpath)
|
| 223 |
+
raise
|
| 224 |
+
f = lambda item: bool(xpath(
|
| 225 |
+
DatasetItemEncoder.encode(item, extractor.categories())))
|
| 226 |
+
return extractor.select(f)
|
| 227 |
+
|
| 228 |
+
class XPathAnnotationsFilter(Transform):
|
| 229 |
+
def __init__(self, extractor, xpath=None, remove_empty=False):
|
| 230 |
+
super().__init__(extractor)
|
| 231 |
+
|
| 232 |
+
if xpath is not None:
|
| 233 |
+
try:
|
| 234 |
+
xpath = ET.XPath(xpath)
|
| 235 |
+
except Exception:
|
| 236 |
+
log.error("Failed to create XPath from expression '%s'", xpath)
|
| 237 |
+
raise
|
| 238 |
+
self._filter = xpath
|
| 239 |
+
|
| 240 |
+
self._remove_empty = remove_empty
|
| 241 |
+
|
| 242 |
+
def __iter__(self):
|
| 243 |
+
for item in self._extractor:
|
| 244 |
+
item = self.transform_item(item)
|
| 245 |
+
if item is not None:
|
| 246 |
+
yield item
|
| 247 |
+
|
| 248 |
+
def transform_item(self, item):
|
| 249 |
+
if self._filter is None:
|
| 250 |
+
return item
|
| 251 |
+
|
| 252 |
+
encoded = DatasetItemEncoder.encode(item, self._extractor.categories())
|
| 253 |
+
filtered = self._filter(encoded)
|
| 254 |
+
filtered = [elem for elem in filtered if elem.tag == 'annotation']
|
| 255 |
+
|
| 256 |
+
encoded = encoded.findall('annotation')
|
| 257 |
+
annotations = [item.annotations[encoded.index(e)] for e in filtered]
|
| 258 |
+
|
| 259 |
+
if self._remove_empty and len(annotations) == 0:
|
| 260 |
+
return None
|
| 261 |
+
return self.wrap_item(item, annotations=annotations)
|
testbed/openvinotoolkit__datumaro/datumaro/components/extractor.py
ADDED
|
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from enum import Enum
|
| 7 |
+
from glob import glob
|
| 8 |
+
from typing import List, Dict
|
| 9 |
+
import numpy as np
|
| 10 |
+
import os.path as osp
|
| 11 |
+
|
| 12 |
+
import attr
|
| 13 |
+
from attr import attrs, attrib
|
| 14 |
+
|
| 15 |
+
from datumaro.util.image import Image
|
| 16 |
+
from datumaro.util.attrs_util import not_empty, default_if_none
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
AnnotationType = Enum('AnnotationType',
|
| 20 |
+
[
|
| 21 |
+
'label',
|
| 22 |
+
'mask',
|
| 23 |
+
'points',
|
| 24 |
+
'polygon',
|
| 25 |
+
'polyline',
|
| 26 |
+
'bbox',
|
| 27 |
+
'caption',
|
| 28 |
+
])
|
| 29 |
+
|
| 30 |
+
_COORDINATE_ROUNDING_DIGITS = 2
|
| 31 |
+
|
| 32 |
+
@attrs(kw_only=True)
|
| 33 |
+
class Annotation:
|
| 34 |
+
id = attrib(default=0, validator=default_if_none(int))
|
| 35 |
+
attributes = attrib(factory=dict, validator=default_if_none(dict))
|
| 36 |
+
group = attrib(default=0, validator=default_if_none(int))
|
| 37 |
+
|
| 38 |
+
def __attrs_post_init__(self):
|
| 39 |
+
assert isinstance(self.type, AnnotationType)
|
| 40 |
+
|
| 41 |
+
@property
|
| 42 |
+
def type(self) -> AnnotationType:
|
| 43 |
+
return self._type # must be set in subclasses
|
| 44 |
+
|
| 45 |
+
def wrap(self, **kwargs):
|
| 46 |
+
return attr.evolve(self, **kwargs)
|
| 47 |
+
|
| 48 |
+
@attrs(kw_only=True)
|
| 49 |
+
class Categories:
|
| 50 |
+
attributes = attrib(factory=set, validator=default_if_none(set), eq=False)
|
| 51 |
+
|
| 52 |
+
@attrs
|
| 53 |
+
class LabelCategories(Categories):
|
| 54 |
+
@attrs(repr_ns='LabelCategories')
|
| 55 |
+
class Category:
|
| 56 |
+
name = attrib(converter=str, validator=not_empty)
|
| 57 |
+
parent = attrib(default='', validator=default_if_none(str))
|
| 58 |
+
attributes = attrib(factory=set, validator=default_if_none(set))
|
| 59 |
+
|
| 60 |
+
items = attrib(factory=list, validator=default_if_none(list))
|
| 61 |
+
_indices = attrib(factory=dict, init=False, eq=False)
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def from_iterable(cls, iterable):
|
| 65 |
+
"""Generation of LabelCategories from iterable object
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
iterable ([type]): This iterable object can be:
|
| 69 |
+
1)simple str - will generate one Category with str as name
|
| 70 |
+
2)list of str - will interpreted as list of Category names
|
| 71 |
+
3)list of positional argumetns - will generate Categories
|
| 72 |
+
with this arguments
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
LabelCategories: LabelCategories object
|
| 77 |
+
"""
|
| 78 |
+
temp_categories = cls()
|
| 79 |
+
|
| 80 |
+
if isinstance(iterable, str):
|
| 81 |
+
iterable = [[iterable]]
|
| 82 |
+
|
| 83 |
+
for category in iterable:
|
| 84 |
+
if isinstance(category, str):
|
| 85 |
+
category = [category]
|
| 86 |
+
temp_categories.add(*category)
|
| 87 |
+
|
| 88 |
+
return temp_categories
|
| 89 |
+
|
| 90 |
+
def __attrs_post_init__(self):
|
| 91 |
+
self._reindex()
|
| 92 |
+
|
| 93 |
+
def _reindex(self):
|
| 94 |
+
indices = {}
|
| 95 |
+
for index, item in enumerate(self.items):
|
| 96 |
+
assert item.name not in self._indices
|
| 97 |
+
indices[item.name] = index
|
| 98 |
+
self._indices = indices
|
| 99 |
+
|
| 100 |
+
def add(self, name: str, parent: str = None, attributes: dict = None):
|
| 101 |
+
assert name not in self._indices, name
|
| 102 |
+
|
| 103 |
+
index = len(self.items)
|
| 104 |
+
self.items.append(self.Category(name, parent, attributes))
|
| 105 |
+
self._indices[name] = index
|
| 106 |
+
return index
|
| 107 |
+
|
| 108 |
+
def find(self, name: str):
|
| 109 |
+
index = self._indices.get(name)
|
| 110 |
+
if index is not None:
|
| 111 |
+
return index, self.items[index]
|
| 112 |
+
return index, None
|
| 113 |
+
|
| 114 |
+
def __getitem__(self, idx):
|
| 115 |
+
return self.items[idx]
|
| 116 |
+
|
| 117 |
+
def __len__(self):
|
| 118 |
+
return len(self.items)
|
| 119 |
+
|
| 120 |
+
def __iter__(self):
|
| 121 |
+
return iter(self.items)
|
| 122 |
+
|
| 123 |
+
@attrs
|
| 124 |
+
class Label(Annotation):
|
| 125 |
+
_type = AnnotationType.label
|
| 126 |
+
label = attrib(converter=int)
|
| 127 |
+
|
| 128 |
+
@attrs(eq=False)
|
| 129 |
+
class MaskCategories(Categories):
|
| 130 |
+
colormap = attrib(factory=dict, validator=default_if_none(dict))
|
| 131 |
+
_inverse_colormap = attrib(default=None,
|
| 132 |
+
validator=attr.validators.optional(dict))
|
| 133 |
+
|
| 134 |
+
@property
|
| 135 |
+
def inverse_colormap(self):
|
| 136 |
+
from datumaro.util.mask_tools import invert_colormap
|
| 137 |
+
if self._inverse_colormap is None:
|
| 138 |
+
if self.colormap is not None:
|
| 139 |
+
self._inverse_colormap = invert_colormap(self.colormap)
|
| 140 |
+
return self._inverse_colormap
|
| 141 |
+
|
| 142 |
+
def __eq__(self, other):
|
| 143 |
+
if not super().__eq__(other):
|
| 144 |
+
return False
|
| 145 |
+
if not isinstance(other, __class__):
|
| 146 |
+
return False
|
| 147 |
+
for label_id, my_color in self.colormap.items():
|
| 148 |
+
other_color = other.colormap.get(label_id)
|
| 149 |
+
if not np.array_equal(my_color, other_color):
|
| 150 |
+
return False
|
| 151 |
+
return True
|
| 152 |
+
|
| 153 |
+
@attrs(eq=False)
|
| 154 |
+
class Mask(Annotation):
|
| 155 |
+
_type = AnnotationType.mask
|
| 156 |
+
_image = attrib()
|
| 157 |
+
label = attrib(converter=attr.converters.optional(int),
|
| 158 |
+
default=None, kw_only=True)
|
| 159 |
+
z_order = attrib(default=0, validator=default_if_none(int), kw_only=True)
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def image(self):
|
| 163 |
+
if callable(self._image):
|
| 164 |
+
return self._image()
|
| 165 |
+
return self._image
|
| 166 |
+
|
| 167 |
+
def as_class_mask(self, label_id=None):
|
| 168 |
+
if label_id is None:
|
| 169 |
+
label_id = self.label
|
| 170 |
+
return self.image * label_id
|
| 171 |
+
|
| 172 |
+
def as_instance_mask(self, instance_id):
|
| 173 |
+
return self.image * instance_id
|
| 174 |
+
|
| 175 |
+
def get_area(self):
|
| 176 |
+
return np.count_nonzero(self.image)
|
| 177 |
+
|
| 178 |
+
def get_bbox(self):
|
| 179 |
+
from datumaro.util.mask_tools import find_mask_bbox
|
| 180 |
+
return find_mask_bbox(self.image)
|
| 181 |
+
|
| 182 |
+
def paint(self, colormap):
|
| 183 |
+
from datumaro.util.mask_tools import paint_mask
|
| 184 |
+
return paint_mask(self.as_class_mask(), colormap)
|
| 185 |
+
|
| 186 |
+
def __eq__(self, other):
|
| 187 |
+
if not super().__eq__(other):
|
| 188 |
+
return False
|
| 189 |
+
if not isinstance(other, __class__):
|
| 190 |
+
return False
|
| 191 |
+
return \
|
| 192 |
+
(self.label == other.label) and \
|
| 193 |
+
(self.z_order == other.z_order) and \
|
| 194 |
+
(np.array_equal(self.image, other.image))
|
| 195 |
+
|
| 196 |
+
@attrs(eq=False)
|
| 197 |
+
class RleMask(Mask):
|
| 198 |
+
rle = attrib()
|
| 199 |
+
_image = attrib(default=attr.Factory(
|
| 200 |
+
lambda self: self._lazy_decode(self.rle),
|
| 201 |
+
takes_self=True), init=False)
|
| 202 |
+
|
| 203 |
+
@staticmethod
|
| 204 |
+
def _lazy_decode(rle):
|
| 205 |
+
from pycocotools import mask as mask_utils
|
| 206 |
+
return lambda: mask_utils.decode(rle).astype(np.bool)
|
| 207 |
+
|
| 208 |
+
def get_area(self):
|
| 209 |
+
from pycocotools import mask as mask_utils
|
| 210 |
+
return mask_utils.area(self.rle)
|
| 211 |
+
|
| 212 |
+
def get_bbox(self):
|
| 213 |
+
from pycocotools import mask as mask_utils
|
| 214 |
+
return mask_utils.toBbox(self.rle)
|
| 215 |
+
|
| 216 |
+
def __eq__(self, other):
|
| 217 |
+
if not isinstance(other, __class__):
|
| 218 |
+
return super().__eq__(other)
|
| 219 |
+
return self.rle == other.rle
|
| 220 |
+
|
| 221 |
+
class CompiledMask:
|
| 222 |
+
@staticmethod
|
| 223 |
+
def from_instance_masks(instance_masks,
|
| 224 |
+
instance_ids=None, instance_labels=None):
|
| 225 |
+
from datumaro.util.mask_tools import merge_masks
|
| 226 |
+
|
| 227 |
+
if instance_ids is not None:
|
| 228 |
+
assert len(instance_ids) == len(instance_masks)
|
| 229 |
+
else:
|
| 230 |
+
instance_ids = [None] * len(instance_masks)
|
| 231 |
+
|
| 232 |
+
if instance_labels is not None:
|
| 233 |
+
assert len(instance_labels) == len(instance_masks)
|
| 234 |
+
else:
|
| 235 |
+
instance_labels = [None] * len(instance_masks)
|
| 236 |
+
|
| 237 |
+
instance_masks = sorted(
|
| 238 |
+
zip(instance_masks, instance_ids, instance_labels),
|
| 239 |
+
key=lambda m: m[0].z_order)
|
| 240 |
+
|
| 241 |
+
instance_mask = [m.as_instance_mask(id if id is not None else 1 + idx)
|
| 242 |
+
for idx, (m, id, _) in enumerate(instance_masks)]
|
| 243 |
+
instance_mask = merge_masks(instance_mask)
|
| 244 |
+
|
| 245 |
+
cls_mask = [m.as_class_mask(c) for m, _, c in instance_masks]
|
| 246 |
+
cls_mask = merge_masks(cls_mask)
|
| 247 |
+
return __class__(class_mask=cls_mask, instance_mask=instance_mask)
|
| 248 |
+
|
| 249 |
+
def __init__(self, class_mask=None, instance_mask=None):
|
| 250 |
+
self._class_mask = class_mask
|
| 251 |
+
self._instance_mask = instance_mask
|
| 252 |
+
|
| 253 |
+
@staticmethod
|
| 254 |
+
def _get_image(image):
|
| 255 |
+
if callable(image):
|
| 256 |
+
return image()
|
| 257 |
+
return image
|
| 258 |
+
|
| 259 |
+
@property
|
| 260 |
+
def class_mask(self):
|
| 261 |
+
return self._get_image(self._class_mask)
|
| 262 |
+
|
| 263 |
+
@property
|
| 264 |
+
def instance_mask(self):
|
| 265 |
+
return self._get_image(self._instance_mask)
|
| 266 |
+
|
| 267 |
+
@property
|
| 268 |
+
def instance_count(self):
|
| 269 |
+
return int(self.instance_mask.max())
|
| 270 |
+
|
| 271 |
+
def get_instance_labels(self):
|
| 272 |
+
class_shift = 16
|
| 273 |
+
m = (self.class_mask.astype(np.uint32) << class_shift) \
|
| 274 |
+
+ self.instance_mask.astype(np.uint32)
|
| 275 |
+
keys = np.unique(m)
|
| 276 |
+
instance_labels = {k & ((1 << class_shift) - 1): k >> class_shift
|
| 277 |
+
for k in keys if k & ((1 << class_shift) - 1) != 0
|
| 278 |
+
}
|
| 279 |
+
return instance_labels
|
| 280 |
+
|
| 281 |
+
def extract(self, instance_id):
|
| 282 |
+
return self.instance_mask == instance_id
|
| 283 |
+
|
| 284 |
+
def lazy_extract(self, instance_id):
|
| 285 |
+
return lambda: self.extract(instance_id)
|
| 286 |
+
|
| 287 |
+
@attrs
|
| 288 |
+
class _Shape(Annotation):
|
| 289 |
+
points = attrib(converter=lambda x:
|
| 290 |
+
[round(p, _COORDINATE_ROUNDING_DIGITS) for p in x])
|
| 291 |
+
label = attrib(converter=attr.converters.optional(int),
|
| 292 |
+
default=None, kw_only=True)
|
| 293 |
+
z_order = attrib(default=0, validator=default_if_none(int), kw_only=True)
|
| 294 |
+
|
| 295 |
+
def get_area(self):
|
| 296 |
+
raise NotImplementedError()
|
| 297 |
+
|
| 298 |
+
def get_bbox(self):
|
| 299 |
+
points = self.points
|
| 300 |
+
if not points:
|
| 301 |
+
return None
|
| 302 |
+
|
| 303 |
+
xs = [p for p in points[0::2]]
|
| 304 |
+
ys = [p for p in points[1::2]]
|
| 305 |
+
x0 = min(xs)
|
| 306 |
+
x1 = max(xs)
|
| 307 |
+
y0 = min(ys)
|
| 308 |
+
y1 = max(ys)
|
| 309 |
+
return [x0, y0, x1 - x0, y1 - y0]
|
| 310 |
+
|
| 311 |
+
@attrs
|
| 312 |
+
class PolyLine(_Shape):
|
| 313 |
+
_type = AnnotationType.polyline
|
| 314 |
+
|
| 315 |
+
def as_polygon(self):
|
| 316 |
+
return self.points[:]
|
| 317 |
+
|
| 318 |
+
def get_area(self):
|
| 319 |
+
return 0
|
| 320 |
+
|
| 321 |
+
@attrs
|
| 322 |
+
class Polygon(_Shape):
|
| 323 |
+
_type = AnnotationType.polygon
|
| 324 |
+
|
| 325 |
+
def __attrs_post_init__(self):
|
| 326 |
+
super().__attrs_post_init__()
|
| 327 |
+
# keep the message on a single line to produce informative output
|
| 328 |
+
assert len(self.points) % 2 == 0 and 3 <= len(self.points) // 2, "Wrong polygon points: %s" % self.points
|
| 329 |
+
|
| 330 |
+
def get_area(self):
|
| 331 |
+
import pycocotools.mask as mask_utils
|
| 332 |
+
|
| 333 |
+
x, y, w, h = self.get_bbox()
|
| 334 |
+
rle = mask_utils.frPyObjects([self.points], y + h, x + w)
|
| 335 |
+
area = mask_utils.area(rle)[0]
|
| 336 |
+
return area
|
| 337 |
+
|
| 338 |
+
@attrs
|
| 339 |
+
class Bbox(_Shape):
|
| 340 |
+
_type = AnnotationType.bbox
|
| 341 |
+
|
| 342 |
+
# will be overridden by attrs, then will be overridden again by us
|
| 343 |
+
# attrs' method will be renamed to __attrs_init__
|
| 344 |
+
def __init__(self, x, y, w, h, *args, **kwargs):
|
| 345 |
+
kwargs.pop('points', None) # comes from wrap()
|
| 346 |
+
self.__attrs_init__([x, y, x + w, y + h], *args, **kwargs)
|
| 347 |
+
__actual_init__ = __init__ # save pointer
|
| 348 |
+
|
| 349 |
+
@property
|
| 350 |
+
def x(self):
|
| 351 |
+
return self.points[0]
|
| 352 |
+
|
| 353 |
+
@property
|
| 354 |
+
def y(self):
|
| 355 |
+
return self.points[1]
|
| 356 |
+
|
| 357 |
+
@property
|
| 358 |
+
def w(self):
|
| 359 |
+
return self.points[2] - self.points[0]
|
| 360 |
+
|
| 361 |
+
@property
|
| 362 |
+
def h(self):
|
| 363 |
+
return self.points[3] - self.points[1]
|
| 364 |
+
|
| 365 |
+
def get_area(self):
|
| 366 |
+
return self.w * self.h
|
| 367 |
+
|
| 368 |
+
def get_bbox(self):
|
| 369 |
+
return [self.x, self.y, self.w, self.h]
|
| 370 |
+
|
| 371 |
+
def as_polygon(self):
|
| 372 |
+
x, y, w, h = self.get_bbox()
|
| 373 |
+
return [
|
| 374 |
+
x, y,
|
| 375 |
+
x + w, y,
|
| 376 |
+
x + w, y + h,
|
| 377 |
+
x, y + h
|
| 378 |
+
]
|
| 379 |
+
|
| 380 |
+
def iou(self, other):
|
| 381 |
+
from datumaro.util.annotation_util import bbox_iou
|
| 382 |
+
return bbox_iou(self.get_bbox(), other.get_bbox())
|
| 383 |
+
|
| 384 |
+
def wrap(item, **kwargs):
|
| 385 |
+
d = {'x': item.x, 'y': item.y, 'w': item.w, 'h': item.h}
|
| 386 |
+
d.update(kwargs)
|
| 387 |
+
return attr.evolve(item, **d)
|
| 388 |
+
|
| 389 |
+
assert not hasattr(Bbox, '__attrs_init__') # hopefully, it will be supported
|
| 390 |
+
setattr(Bbox, '__attrs_init__', Bbox.__init__)
|
| 391 |
+
setattr(Bbox, '__init__', Bbox.__actual_init__)
|
| 392 |
+
|
| 393 |
+
@attrs
|
| 394 |
+
class PointsCategories(Categories):
|
| 395 |
+
@attrs(repr_ns="PointsCategories")
|
| 396 |
+
class Category:
|
| 397 |
+
labels = attrib(factory=list, validator=default_if_none(list))
|
| 398 |
+
joints = attrib(factory=set, validator=default_if_none(set))
|
| 399 |
+
|
| 400 |
+
items = attrib(factory=dict, validator=default_if_none(dict))
|
| 401 |
+
|
| 402 |
+
@classmethod
|
| 403 |
+
def from_iterable(cls, iterable):
|
| 404 |
+
"""Generation of PointsCategories from iterable object
|
| 405 |
+
|
| 406 |
+
Args:
|
| 407 |
+
iterable ([type]): This iterable object can be:
|
| 408 |
+
1) list of positional argumetns - will generate Categories
|
| 409 |
+
with these arguments
|
| 410 |
+
|
| 411 |
+
Returns:
|
| 412 |
+
PointsCategories: PointsCategories object
|
| 413 |
+
"""
|
| 414 |
+
temp_categories = cls()
|
| 415 |
+
|
| 416 |
+
for category in iterable:
|
| 417 |
+
temp_categories.add(*category)
|
| 418 |
+
return temp_categories
|
| 419 |
+
|
| 420 |
+
def add(self, label_id, labels=None, joints=None):
|
| 421 |
+
if joints is None:
|
| 422 |
+
joints = []
|
| 423 |
+
joints = set(map(tuple, joints))
|
| 424 |
+
self.items[label_id] = self.Category(labels, joints)
|
| 425 |
+
|
| 426 |
+
@attrs
|
| 427 |
+
class Points(_Shape):
|
| 428 |
+
Visibility = Enum('Visibility', [
|
| 429 |
+
('absent', 0),
|
| 430 |
+
('hidden', 1),
|
| 431 |
+
('visible', 2),
|
| 432 |
+
])
|
| 433 |
+
_type = AnnotationType.points
|
| 434 |
+
|
| 435 |
+
visibility = attrib(type=list, default=None)
|
| 436 |
+
@visibility.validator
|
| 437 |
+
def _visibility_validator(self, attribute, visibility):
|
| 438 |
+
if visibility is None:
|
| 439 |
+
visibility = [self.Visibility.visible] * (len(self.points) // 2)
|
| 440 |
+
else:
|
| 441 |
+
for i, v in enumerate(visibility):
|
| 442 |
+
if not isinstance(v, self.Visibility):
|
| 443 |
+
visibility[i] = self.Visibility(v)
|
| 444 |
+
assert len(visibility) == len(self.points) // 2
|
| 445 |
+
self.visibility = visibility
|
| 446 |
+
|
| 447 |
+
def __attrs_post_init__(self):
|
| 448 |
+
super().__attrs_post_init__()
|
| 449 |
+
assert len(self.points) % 2 == 0, self.points
|
| 450 |
+
|
| 451 |
+
def get_area(self):
|
| 452 |
+
return 0
|
| 453 |
+
|
| 454 |
+
def get_bbox(self):
|
| 455 |
+
xs = [p for p, v in zip(self.points[0::2], self.visibility)
|
| 456 |
+
if v != __class__.Visibility.absent]
|
| 457 |
+
ys = [p for p, v in zip(self.points[1::2], self.visibility)
|
| 458 |
+
if v != __class__.Visibility.absent]
|
| 459 |
+
x0 = min(xs, default=0)
|
| 460 |
+
x1 = max(xs, default=0)
|
| 461 |
+
y0 = min(ys, default=0)
|
| 462 |
+
y1 = max(ys, default=0)
|
| 463 |
+
return [x0, y0, x1 - x0, y1 - y0]
|
| 464 |
+
|
| 465 |
+
@attrs
|
| 466 |
+
class Caption(Annotation):
|
| 467 |
+
_type = AnnotationType.caption
|
| 468 |
+
caption = attrib(converter=str)
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
DEFAULT_SUBSET_NAME = 'default'
|
| 472 |
+
|
| 473 |
+
@attrs
|
| 474 |
+
class DatasetItem:
|
| 475 |
+
id = attrib(converter=lambda x: str(x).replace('\\', '/'),
|
| 476 |
+
type=str, validator=not_empty)
|
| 477 |
+
annotations = attrib(factory=list, validator=default_if_none(list))
|
| 478 |
+
subset = attrib(converter=lambda v: v or DEFAULT_SUBSET_NAME, default=None)
|
| 479 |
+
path = attrib(factory=list, validator=default_if_none(list))
|
| 480 |
+
|
| 481 |
+
image = attrib(type=Image, default=None)
|
| 482 |
+
@image.validator
|
| 483 |
+
def _image_validator(self, attribute, image):
|
| 484 |
+
if callable(image) or isinstance(image, np.ndarray):
|
| 485 |
+
image = Image(data=image)
|
| 486 |
+
elif isinstance(image, str):
|
| 487 |
+
image = Image(path=image)
|
| 488 |
+
assert image is None or isinstance(image, Image)
|
| 489 |
+
self.image = image
|
| 490 |
+
|
| 491 |
+
attributes = attrib(factory=dict, validator=default_if_none(dict))
|
| 492 |
+
|
| 493 |
+
@property
|
| 494 |
+
def has_image(self):
|
| 495 |
+
return self.image is not None
|
| 496 |
+
|
| 497 |
+
def wrap(item, **kwargs):
|
| 498 |
+
return attr.evolve(item, **kwargs)
|
| 499 |
+
|
| 500 |
+
class IExtractor:
|
| 501 |
+
def __iter__(self):
|
| 502 |
+
raise NotImplementedError()
|
| 503 |
+
|
| 504 |
+
def __len__(self):
|
| 505 |
+
raise NotImplementedError()
|
| 506 |
+
|
| 507 |
+
def subsets(self):
|
| 508 |
+
raise NotImplementedError()
|
| 509 |
+
|
| 510 |
+
def get_subset(self, name):
|
| 511 |
+
raise NotImplementedError()
|
| 512 |
+
|
| 513 |
+
def categories(self):
|
| 514 |
+
raise NotImplementedError()
|
| 515 |
+
|
| 516 |
+
def select(self, pred):
|
| 517 |
+
raise NotImplementedError()
|
| 518 |
+
|
| 519 |
+
class Extractor(IExtractor):
|
| 520 |
+
def __init__(self, length=None, subsets=None):
|
| 521 |
+
self._length = length
|
| 522 |
+
self._subsets = subsets
|
| 523 |
+
|
| 524 |
+
def _init_cache(self):
|
| 525 |
+
subsets = set()
|
| 526 |
+
length = -1
|
| 527 |
+
for length, item in enumerate(self):
|
| 528 |
+
subsets.add(item.subset)
|
| 529 |
+
length += 1
|
| 530 |
+
|
| 531 |
+
if self._length is None:
|
| 532 |
+
self._length = length
|
| 533 |
+
if self._subsets is None:
|
| 534 |
+
self._subsets = subsets
|
| 535 |
+
|
| 536 |
+
def __len__(self):
|
| 537 |
+
if self._length is None:
|
| 538 |
+
self._init_cache()
|
| 539 |
+
return self._length
|
| 540 |
+
|
| 541 |
+
def subsets(self) -> Dict[str, IExtractor]:
|
| 542 |
+
if self._subsets is None:
|
| 543 |
+
self._init_cache()
|
| 544 |
+
return {name or DEFAULT_SUBSET_NAME: self.get_subset(name)
|
| 545 |
+
for name in self._subsets}
|
| 546 |
+
|
| 547 |
+
def get_subset(self, name):
|
| 548 |
+
if self._subsets is None:
|
| 549 |
+
self._init_cache()
|
| 550 |
+
if name in self._subsets:
|
| 551 |
+
return self.select(lambda item: item.subset == name)
|
| 552 |
+
else:
|
| 553 |
+
raise Exception("Unknown subset '%s', available subsets: %s" % \
|
| 554 |
+
(name, set(self._subsets)))
|
| 555 |
+
|
| 556 |
+
def transform(self, method, *args, **kwargs):
|
| 557 |
+
return method(self, *args, **kwargs)
|
| 558 |
+
|
| 559 |
+
def select(self, pred):
|
| 560 |
+
class _DatasetFilter(Extractor):
|
| 561 |
+
def __init__(self, _):
|
| 562 |
+
super().__init__()
|
| 563 |
+
def __iter__(_):
|
| 564 |
+
return filter(pred, iter(self))
|
| 565 |
+
def categories(_):
|
| 566 |
+
return self.categories()
|
| 567 |
+
|
| 568 |
+
return self.transform(_DatasetFilter)
|
| 569 |
+
|
| 570 |
+
def categories(self):
|
| 571 |
+
return {}
|
| 572 |
+
|
| 573 |
+
class SourceExtractor(Extractor):
|
| 574 |
+
def __init__(self, length=None, subset=None):
|
| 575 |
+
self._subset = subset or DEFAULT_SUBSET_NAME
|
| 576 |
+
super().__init__(length=length, subsets=[self._subset])
|
| 577 |
+
|
| 578 |
+
self._categories = {}
|
| 579 |
+
self._items = []
|
| 580 |
+
|
| 581 |
+
def categories(self):
|
| 582 |
+
return self._categories
|
| 583 |
+
|
| 584 |
+
def __iter__(self):
|
| 585 |
+
yield from self._items
|
| 586 |
+
|
| 587 |
+
def __len__(self):
|
| 588 |
+
return len(self._items)
|
| 589 |
+
|
| 590 |
+
class Importer:
|
| 591 |
+
@classmethod
|
| 592 |
+
def detect(cls, path):
|
| 593 |
+
return len(cls.find_sources(path)) != 0
|
| 594 |
+
|
| 595 |
+
@classmethod
|
| 596 |
+
def find_sources(cls, path) -> List[Dict]:
|
| 597 |
+
raise NotImplementedError()
|
| 598 |
+
|
| 599 |
+
def __call__(self, path, **extra_params):
|
| 600 |
+
from datumaro.components.project import Project # cyclic import
|
| 601 |
+
project = Project()
|
| 602 |
+
|
| 603 |
+
sources = self.find_sources(osp.normpath(path))
|
| 604 |
+
if len(sources) == 0:
|
| 605 |
+
raise Exception("Failed to find dataset at '%s'" % path)
|
| 606 |
+
|
| 607 |
+
for desc in sources:
|
| 608 |
+
params = dict(extra_params)
|
| 609 |
+
params.update(desc.get('options', {}))
|
| 610 |
+
desc['options'] = params
|
| 611 |
+
|
| 612 |
+
source_name = osp.splitext(osp.basename(desc['url']))[0]
|
| 613 |
+
project.add_source(source_name, desc)
|
| 614 |
+
|
| 615 |
+
return project
|
| 616 |
+
|
| 617 |
+
@classmethod
|
| 618 |
+
def _find_sources_recursive(cls, path, ext, extractor_name, filename='*'):
|
| 619 |
+
if path.endswith(ext) and osp.isfile(path):
|
| 620 |
+
sources = [{'url': path, 'format': extractor_name}]
|
| 621 |
+
else:
|
| 622 |
+
sources = [{'url': p, 'format': extractor_name} for p in
|
| 623 |
+
glob(osp.join(path, '**', filename + ext), recursive=True)]
|
| 624 |
+
return sources
|
| 625 |
+
|
| 626 |
+
class Transform(Extractor):
|
| 627 |
+
@staticmethod
|
| 628 |
+
def wrap_item(item, **kwargs):
|
| 629 |
+
return item.wrap(**kwargs)
|
| 630 |
+
|
| 631 |
+
def __init__(self, extractor):
|
| 632 |
+
super().__init__()
|
| 633 |
+
|
| 634 |
+
self._extractor = extractor
|
| 635 |
+
|
| 636 |
+
def __iter__(self):
|
| 637 |
+
for item in self._extractor:
|
| 638 |
+
yield self.transform_item(item)
|
| 639 |
+
|
| 640 |
+
def categories(self):
|
| 641 |
+
return self._extractor.categories()
|
| 642 |
+
|
| 643 |
+
def subsets(self):
|
| 644 |
+
if self._subsets is None:
|
| 645 |
+
self._subsets = set(self._extractor.subsets())
|
| 646 |
+
return super().subsets()
|
| 647 |
+
|
| 648 |
+
def __len__(self):
|
| 649 |
+
assert self._length in {None, 'parent'} or isinstance(self._length, int)
|
| 650 |
+
if self._length is None and \
|
| 651 |
+
self.__iter__.__func__ == Transform.__iter__ \
|
| 652 |
+
or self._length == 'parent':
|
| 653 |
+
self._length = len(self._extractor)
|
| 654 |
+
return super().__len__()
|
| 655 |
+
|
| 656 |
+
def transform_item(self, item: DatasetItem) -> DatasetItem:
|
| 657 |
+
raise NotImplementedError()
|
testbed/openvinotoolkit__datumaro/datumaro/components/launcher.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from datumaro.components.extractor import (Transform, LabelCategories,
|
| 9 |
+
AnnotationType)
|
| 10 |
+
from datumaro.util import take_by
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# pylint: disable=no-self-use
|
| 14 |
+
class Launcher:
|
| 15 |
+
def __init__(self, model_dir=None):
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
def launch(self, inputs):
|
| 19 |
+
raise NotImplementedError()
|
| 20 |
+
|
| 21 |
+
def categories(self):
|
| 22 |
+
return None
|
| 23 |
+
# pylint: enable=no-self-use
|
| 24 |
+
|
| 25 |
+
class ModelTransform(Transform):
|
| 26 |
+
def __init__(self, extractor, launcher, batch_size=1):
|
| 27 |
+
super().__init__(extractor)
|
| 28 |
+
self._launcher = launcher
|
| 29 |
+
self._batch_size = batch_size
|
| 30 |
+
|
| 31 |
+
def __iter__(self):
|
| 32 |
+
for batch in take_by(self._extractor, self._batch_size):
|
| 33 |
+
inputs = np.array([item.image.data for item in batch])
|
| 34 |
+
inference = self._launcher.launch(inputs)
|
| 35 |
+
|
| 36 |
+
for item, annotations in zip(batch, inference):
|
| 37 |
+
self._check_annotations(annotations)
|
| 38 |
+
yield self.wrap_item(item, annotations=annotations)
|
| 39 |
+
|
| 40 |
+
def get_subset(self, name):
|
| 41 |
+
subset = self._extractor.get_subset(name)
|
| 42 |
+
return __class__(subset, self._launcher, self._batch_size)
|
| 43 |
+
|
| 44 |
+
def categories(self):
|
| 45 |
+
launcher_override = self._launcher.categories()
|
| 46 |
+
if launcher_override is not None:
|
| 47 |
+
return launcher_override
|
| 48 |
+
return self._extractor.categories()
|
| 49 |
+
|
| 50 |
+
def transform_item(self, item):
|
| 51 |
+
inputs = np.expand_dims(item.image, axis=0)
|
| 52 |
+
annotations = self._launcher.launch(inputs)[0]
|
| 53 |
+
return self.wrap_item(item, annotations=annotations)
|
| 54 |
+
|
| 55 |
+
def _check_annotations(self, annotations):
|
| 56 |
+
labels_count = len(self.categories().get(
|
| 57 |
+
AnnotationType.label, LabelCategories()).items)
|
| 58 |
+
|
| 59 |
+
for ann in annotations:
|
| 60 |
+
label = getattr(ann, 'label')
|
| 61 |
+
if label is None:
|
| 62 |
+
continue
|
| 63 |
+
|
| 64 |
+
if label not in range(labels_count):
|
| 65 |
+
raise Exception("Annotation has unexpected label id %s, "
|
| 66 |
+
"while there is only %s defined labels." % \
|
| 67 |
+
(label, labels_count))
|
testbed/openvinotoolkit__datumaro/datumaro/components/operations.py
ADDED
|
@@ -0,0 +1,1503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
from copy import deepcopy
|
| 8 |
+
import hashlib
|
| 9 |
+
import logging as log
|
| 10 |
+
|
| 11 |
+
import attr
|
| 12 |
+
import cv2
|
| 13 |
+
import numpy as np
|
| 14 |
+
from attr import attrib, attrs
|
| 15 |
+
from unittest import TestCase
|
| 16 |
+
|
| 17 |
+
from datumaro.components.cli_plugin import CliPlugin
|
| 18 |
+
from datumaro.components.extractor import (AnnotationType, Bbox, Label,
|
| 19 |
+
LabelCategories, PointsCategories, MaskCategories)
|
| 20 |
+
from datumaro.components.project import Dataset
|
| 21 |
+
from datumaro.util import find, filter_dict
|
| 22 |
+
from datumaro.util.attrs_util import ensure_cls, default_if_none
|
| 23 |
+
from datumaro.util.annotation_util import (segment_iou, bbox_iou,
|
| 24 |
+
mean_bbox, OKS, find_instances, max_bbox, smooth_line)
|
| 25 |
+
|
| 26 |
+
def get_ann_type(anns, t):
|
| 27 |
+
return [a for a in anns if a.type == t]
|
| 28 |
+
|
| 29 |
+
def match_annotations_equal(a, b):
|
| 30 |
+
matches = []
|
| 31 |
+
a_unmatched = a[:]
|
| 32 |
+
b_unmatched = b[:]
|
| 33 |
+
for a_ann in a:
|
| 34 |
+
for b_ann in b_unmatched:
|
| 35 |
+
if a_ann != b_ann:
|
| 36 |
+
continue
|
| 37 |
+
|
| 38 |
+
matches.append((a_ann, b_ann))
|
| 39 |
+
a_unmatched.remove(a_ann)
|
| 40 |
+
b_unmatched.remove(b_ann)
|
| 41 |
+
break
|
| 42 |
+
|
| 43 |
+
return matches, a_unmatched, b_unmatched
|
| 44 |
+
|
| 45 |
+
def merge_annotations_equal(a, b):
|
| 46 |
+
matches, a_unmatched, b_unmatched = match_annotations_equal(a, b)
|
| 47 |
+
return [ann_a for (ann_a, _) in matches] + a_unmatched + b_unmatched
|
| 48 |
+
|
| 49 |
+
def merge_categories(sources):
|
| 50 |
+
categories = {}
|
| 51 |
+
for source in sources:
|
| 52 |
+
for cat_type, source_cat in source.items():
|
| 53 |
+
existing_cat = categories.setdefault(cat_type, source_cat)
|
| 54 |
+
if existing_cat != source_cat:
|
| 55 |
+
raise NotImplementedError(
|
| 56 |
+
"Merging of datasets with different categories is "
|
| 57 |
+
"only allowed in 'merge' command.")
|
| 58 |
+
return categories
|
| 59 |
+
|
| 60 |
+
class MergingStrategy(CliPlugin):
|
| 61 |
+
@classmethod
|
| 62 |
+
def merge(cls, sources, **options):
|
| 63 |
+
instance = cls(**options)
|
| 64 |
+
return instance(sources)
|
| 65 |
+
|
| 66 |
+
def __init__(self, **options):
|
| 67 |
+
super().__init__(**options)
|
| 68 |
+
self.__dict__['_sources'] = None
|
| 69 |
+
|
| 70 |
+
def __call__(self, sources):
|
| 71 |
+
raise NotImplementedError()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@attrs
|
| 75 |
+
class DatasetError:
|
| 76 |
+
item_id = attrib()
|
| 77 |
+
|
| 78 |
+
@attrs
|
| 79 |
+
class QualityError(DatasetError):
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
@attrs
|
| 83 |
+
class TooCloseError(QualityError):
|
| 84 |
+
a = attrib()
|
| 85 |
+
b = attrib()
|
| 86 |
+
distance = attrib()
|
| 87 |
+
|
| 88 |
+
def __str__(self):
|
| 89 |
+
return "Item %s: annotations are too close: %s, %s, distance = %s" % \
|
| 90 |
+
(self.item_id, self.a, self.b, self.distance)
|
| 91 |
+
|
| 92 |
+
@attrs
|
| 93 |
+
class WrongGroupError(QualityError):
|
| 94 |
+
found = attrib(converter=set)
|
| 95 |
+
expected = attrib(converter=set)
|
| 96 |
+
group = attrib(converter=list)
|
| 97 |
+
|
| 98 |
+
def __str__(self):
|
| 99 |
+
return "Item %s: annotation group has wrong labels: " \
|
| 100 |
+
"found %s, expected %s, group %s" % \
|
| 101 |
+
(self.item_id, self.found, self.expected, self.group)
|
| 102 |
+
|
| 103 |
+
@attrs
|
| 104 |
+
class MergeError(DatasetError):
|
| 105 |
+
sources = attrib(converter=set)
|
| 106 |
+
|
| 107 |
+
@attrs
|
| 108 |
+
class NoMatchingAnnError(MergeError):
|
| 109 |
+
ann = attrib()
|
| 110 |
+
|
| 111 |
+
def __str__(self):
|
| 112 |
+
return "Item %s: can't find matching annotation " \
|
| 113 |
+
"in sources %s, annotation is %s" % \
|
| 114 |
+
(self.item_id, self.sources, self.ann)
|
| 115 |
+
|
| 116 |
+
@attrs
|
| 117 |
+
class NoMatchingItemError(MergeError):
|
| 118 |
+
def __str__(self):
|
| 119 |
+
return "Item %s: can't find matching item in sources %s" % \
|
| 120 |
+
(self.item_id, self.sources)
|
| 121 |
+
|
| 122 |
+
@attrs
|
| 123 |
+
class FailedLabelVotingError(MergeError):
|
| 124 |
+
votes = attrib()
|
| 125 |
+
ann = attrib(default=None)
|
| 126 |
+
|
| 127 |
+
def __str__(self):
|
| 128 |
+
return "Item %s: label voting failed%s, votes %s, sources %s" % \
|
| 129 |
+
(self.item_id, 'for ann %s' % self.ann if self.ann else '',
|
| 130 |
+
self.votes, self.sources)
|
| 131 |
+
|
| 132 |
+
@attrs
|
| 133 |
+
class FailedAttrVotingError(MergeError):
|
| 134 |
+
attr = attrib()
|
| 135 |
+
votes = attrib()
|
| 136 |
+
ann = attrib()
|
| 137 |
+
|
| 138 |
+
def __str__(self):
|
| 139 |
+
return "Item %s: attribute voting failed " \
|
| 140 |
+
"for ann %s, votes %s, sources %s" % \
|
| 141 |
+
(self.item_id, self.ann, self.votes, self.sources)
|
| 142 |
+
|
| 143 |
+
@attrs
|
| 144 |
+
class IntersectMerge(MergingStrategy):
|
| 145 |
+
@attrs(repr_ns='IntersectMerge', kw_only=True)
|
| 146 |
+
class Conf:
|
| 147 |
+
pairwise_dist = attrib(converter=float, default=0.5)
|
| 148 |
+
sigma = attrib(converter=list, factory=list)
|
| 149 |
+
|
| 150 |
+
output_conf_thresh = attrib(converter=float, default=0)
|
| 151 |
+
quorum = attrib(converter=int, default=0)
|
| 152 |
+
ignored_attributes = attrib(converter=set, factory=set)
|
| 153 |
+
|
| 154 |
+
def _groups_conveter(value):
|
| 155 |
+
result = []
|
| 156 |
+
for group in value:
|
| 157 |
+
rg = set()
|
| 158 |
+
for label in group:
|
| 159 |
+
optional = label.endswith('?')
|
| 160 |
+
name = label if not optional else label[:-1]
|
| 161 |
+
rg.add((name, optional))
|
| 162 |
+
result.append(rg)
|
| 163 |
+
return result
|
| 164 |
+
groups = attrib(converter=_groups_conveter, factory=list)
|
| 165 |
+
close_distance = attrib(converter=float, default=0.75)
|
| 166 |
+
conf = attrib(converter=ensure_cls(Conf), factory=Conf)
|
| 167 |
+
|
| 168 |
+
# Error trackers:
|
| 169 |
+
errors = attrib(factory=list, init=False)
|
| 170 |
+
def add_item_error(self, error, *args, **kwargs):
|
| 171 |
+
self.errors.append(error(self._item_id, *args, **kwargs))
|
| 172 |
+
|
| 173 |
+
# Indexes:
|
| 174 |
+
_dataset_map = attrib(init=False) # id(dataset) -> (dataset, index)
|
| 175 |
+
_item_map = attrib(init=False) # id(item) -> (item, id(dataset))
|
| 176 |
+
_ann_map = attrib(init=False) # id(ann) -> (ann, id(item))
|
| 177 |
+
_item_id = attrib(init=False)
|
| 178 |
+
_item = attrib(init=False)
|
| 179 |
+
|
| 180 |
+
# Misc.
|
| 181 |
+
_categories = attrib(init=False) # merged categories
|
| 182 |
+
|
| 183 |
+
def __call__(self, datasets):
|
| 184 |
+
self._categories = self._merge_categories(
|
| 185 |
+
[d.categories() for d in datasets])
|
| 186 |
+
merged = Dataset(categories=self._categories)
|
| 187 |
+
|
| 188 |
+
self._check_groups_definition()
|
| 189 |
+
|
| 190 |
+
item_matches, item_map = self.match_items(datasets)
|
| 191 |
+
self._item_map = item_map
|
| 192 |
+
self._dataset_map = { id(d): (d, i) for i, d in enumerate(datasets) }
|
| 193 |
+
|
| 194 |
+
for item_id, items in item_matches.items():
|
| 195 |
+
self._item_id = item_id
|
| 196 |
+
|
| 197 |
+
if len(items) < len(datasets):
|
| 198 |
+
missing_sources = set(id(s) for s in datasets) - set(items)
|
| 199 |
+
missing_sources = [self._dataset_map[s][1]
|
| 200 |
+
for s in missing_sources]
|
| 201 |
+
self.add_item_error(NoMatchingItemError, missing_sources)
|
| 202 |
+
merged.put(self.merge_items(items))
|
| 203 |
+
|
| 204 |
+
return merged
|
| 205 |
+
|
| 206 |
+
def get_ann_source(self, ann_id):
|
| 207 |
+
return self._item_map[self._ann_map[ann_id][1]][1]
|
| 208 |
+
|
| 209 |
+
def merge_items(self, items):
|
| 210 |
+
self._item = next(iter(items.values()))
|
| 211 |
+
|
| 212 |
+
self._ann_map = {}
|
| 213 |
+
sources = []
|
| 214 |
+
for item in items.values():
|
| 215 |
+
self._ann_map.update({ id(a): (a, id(item))
|
| 216 |
+
for a in item.annotations })
|
| 217 |
+
sources.append(item.annotations)
|
| 218 |
+
log.debug("Merging item %s: source annotations %s" % \
|
| 219 |
+
(self._item_id, list(map(len, sources))))
|
| 220 |
+
|
| 221 |
+
annotations = self.merge_annotations(sources)
|
| 222 |
+
|
| 223 |
+
annotations = [a for a in annotations
|
| 224 |
+
if self.conf.output_conf_thresh <= a.attributes.get('score', 1)]
|
| 225 |
+
|
| 226 |
+
return self._item.wrap(annotations=annotations)
|
| 227 |
+
|
| 228 |
+
def merge_annotations(self, sources):
|
| 229 |
+
self._make_mergers(sources)
|
| 230 |
+
|
| 231 |
+
clusters = self._match_annotations(sources)
|
| 232 |
+
|
| 233 |
+
joined_clusters = sum(clusters.values(), [])
|
| 234 |
+
group_map = self._find_cluster_groups(joined_clusters)
|
| 235 |
+
|
| 236 |
+
annotations = []
|
| 237 |
+
for t, clusters in clusters.items():
|
| 238 |
+
for cluster in clusters:
|
| 239 |
+
self._check_cluster_sources(cluster)
|
| 240 |
+
|
| 241 |
+
merged_clusters = self._merge_clusters(t, clusters)
|
| 242 |
+
|
| 243 |
+
for merged_ann, cluster in zip(merged_clusters, clusters):
|
| 244 |
+
attributes = self._find_cluster_attrs(cluster, merged_ann)
|
| 245 |
+
attributes = { k: v for k, v in attributes.items()
|
| 246 |
+
if k not in self.conf.ignored_attributes }
|
| 247 |
+
attributes.update(merged_ann.attributes)
|
| 248 |
+
merged_ann.attributes = attributes
|
| 249 |
+
|
| 250 |
+
new_group_id = find(enumerate(group_map),
|
| 251 |
+
lambda e: id(cluster) in e[1][0])
|
| 252 |
+
if new_group_id is None:
|
| 253 |
+
new_group_id = 0
|
| 254 |
+
else:
|
| 255 |
+
new_group_id = new_group_id[0] + 1
|
| 256 |
+
merged_ann.group = new_group_id
|
| 257 |
+
|
| 258 |
+
if self.conf.close_distance:
|
| 259 |
+
self._check_annotation_distance(t, merged_clusters)
|
| 260 |
+
|
| 261 |
+
annotations += merged_clusters
|
| 262 |
+
|
| 263 |
+
if self.conf.groups:
|
| 264 |
+
self._check_groups(annotations)
|
| 265 |
+
|
| 266 |
+
return annotations
|
| 267 |
+
|
| 268 |
+
@staticmethod
|
| 269 |
+
def match_items(datasets):
|
| 270 |
+
item_ids = set((item.id, item.subset) for d in datasets for item in d)
|
| 271 |
+
|
| 272 |
+
item_map = {} # id(item) -> (item, id(dataset))
|
| 273 |
+
|
| 274 |
+
matches = OrderedDict()
|
| 275 |
+
for (item_id, item_subset) in sorted(item_ids, key=lambda e: e[0]):
|
| 276 |
+
items = {}
|
| 277 |
+
for d in datasets:
|
| 278 |
+
try:
|
| 279 |
+
item = d.get(item_id, subset=item_subset)
|
| 280 |
+
items[id(d)] = item
|
| 281 |
+
item_map[id(item)] = (item, id(d))
|
| 282 |
+
except KeyError:
|
| 283 |
+
pass
|
| 284 |
+
matches[(item_id, item_subset)] = items
|
| 285 |
+
|
| 286 |
+
return matches, item_map
|
| 287 |
+
|
| 288 |
+
def _merge_label_categories(self, sources):
|
| 289 |
+
same = True
|
| 290 |
+
common = None
|
| 291 |
+
for src_categories in sources:
|
| 292 |
+
src_cat = src_categories.get(AnnotationType.label)
|
| 293 |
+
if common is None:
|
| 294 |
+
common = src_cat
|
| 295 |
+
elif common != src_cat:
|
| 296 |
+
same = False
|
| 297 |
+
break
|
| 298 |
+
|
| 299 |
+
if same:
|
| 300 |
+
return common
|
| 301 |
+
|
| 302 |
+
dst_cat = LabelCategories()
|
| 303 |
+
for src_id, src_categories in enumerate(sources):
|
| 304 |
+
src_cat = src_categories.get(AnnotationType.label)
|
| 305 |
+
if src_cat is None:
|
| 306 |
+
continue
|
| 307 |
+
|
| 308 |
+
for src_label in src_cat.items:
|
| 309 |
+
dst_label = dst_cat.find(src_label.name)[1]
|
| 310 |
+
if dst_label is not None:
|
| 311 |
+
if dst_label != src_label:
|
| 312 |
+
if src_label.parent and dst_label.parent and \
|
| 313 |
+
src_label.parent != dst_label.parent:
|
| 314 |
+
raise ValueError("Can't merge label category "
|
| 315 |
+
"%s (from #%s): "
|
| 316 |
+
"parent label conflict: %s vs. %s" % \
|
| 317 |
+
(src_label.name, src_id,
|
| 318 |
+
src_label.parent, dst_label.parent)
|
| 319 |
+
)
|
| 320 |
+
dst_label.parent = dst_label.parent or src_label.parent
|
| 321 |
+
dst_label.attributes |= src_label.attributes
|
| 322 |
+
else:
|
| 323 |
+
pass
|
| 324 |
+
else:
|
| 325 |
+
dst_cat.add(src_label.name,
|
| 326 |
+
src_label.parent, src_label.attributes)
|
| 327 |
+
|
| 328 |
+
return dst_cat
|
| 329 |
+
|
| 330 |
+
def _merge_point_categories(self, sources, label_cat):
|
| 331 |
+
dst_point_cat = PointsCategories()
|
| 332 |
+
|
| 333 |
+
for src_id, src_categories in enumerate(sources):
|
| 334 |
+
src_label_cat = src_categories.get(AnnotationType.label)
|
| 335 |
+
src_point_cat = src_categories.get(AnnotationType.points)
|
| 336 |
+
if src_label_cat is None or src_point_cat is None:
|
| 337 |
+
continue
|
| 338 |
+
|
| 339 |
+
for src_label_id, src_cat in src_point_cat.items.items():
|
| 340 |
+
src_label = src_label_cat.items[src_label_id].name
|
| 341 |
+
dst_label_id = label_cat.find(src_label)[0]
|
| 342 |
+
dst_cat = dst_point_cat.items.get(dst_label_id)
|
| 343 |
+
if dst_cat is not None:
|
| 344 |
+
if dst_cat != src_cat:
|
| 345 |
+
raise ValueError("Can't merge point category for label "
|
| 346 |
+
"%s (from #%s): %s vs. %s" % \
|
| 347 |
+
(src_label, src_id, src_cat, dst_cat)
|
| 348 |
+
)
|
| 349 |
+
else:
|
| 350 |
+
pass
|
| 351 |
+
else:
|
| 352 |
+
dst_point_cat.add(dst_label_id,
|
| 353 |
+
src_cat.labels, src_cat.joints)
|
| 354 |
+
|
| 355 |
+
if len(dst_point_cat.items) == 0:
|
| 356 |
+
return None
|
| 357 |
+
|
| 358 |
+
return dst_point_cat
|
| 359 |
+
|
| 360 |
+
def _merge_mask_categories(self, sources, label_cat):
|
| 361 |
+
dst_mask_cat = MaskCategories()
|
| 362 |
+
|
| 363 |
+
for src_id, src_categories in enumerate(sources):
|
| 364 |
+
src_label_cat = src_categories.get(AnnotationType.label)
|
| 365 |
+
src_mask_cat = src_categories.get(AnnotationType.mask)
|
| 366 |
+
if src_label_cat is None or src_mask_cat is None:
|
| 367 |
+
continue
|
| 368 |
+
|
| 369 |
+
for src_label_id, src_cat in src_mask_cat.colormap.items():
|
| 370 |
+
src_label = src_label_cat.items[src_label_id].name
|
| 371 |
+
dst_label_id = label_cat.find(src_label)[0]
|
| 372 |
+
dst_cat = dst_mask_cat.colormap.get(dst_label_id)
|
| 373 |
+
if dst_cat is not None:
|
| 374 |
+
if dst_cat != src_cat:
|
| 375 |
+
raise ValueError("Can't merge mask category for label "
|
| 376 |
+
"%s (from #%s): %s vs. %s" % \
|
| 377 |
+
(src_label, src_id, src_cat, dst_cat)
|
| 378 |
+
)
|
| 379 |
+
else:
|
| 380 |
+
pass
|
| 381 |
+
else:
|
| 382 |
+
dst_mask_cat.colormap[dst_label_id] = src_cat
|
| 383 |
+
|
| 384 |
+
if len(dst_mask_cat.colormap) == 0:
|
| 385 |
+
return None
|
| 386 |
+
|
| 387 |
+
return dst_mask_cat
|
| 388 |
+
|
| 389 |
+
def _merge_categories(self, sources):
|
| 390 |
+
dst_categories = {}
|
| 391 |
+
|
| 392 |
+
label_cat = self._merge_label_categories(sources)
|
| 393 |
+
if label_cat is None:
|
| 394 |
+
return dst_categories
|
| 395 |
+
|
| 396 |
+
dst_categories[AnnotationType.label] = label_cat
|
| 397 |
+
|
| 398 |
+
points_cat = self._merge_point_categories(sources, label_cat)
|
| 399 |
+
if points_cat is not None:
|
| 400 |
+
dst_categories[AnnotationType.points] = points_cat
|
| 401 |
+
|
| 402 |
+
mask_cat = self._merge_mask_categories(sources, label_cat)
|
| 403 |
+
if mask_cat is not None:
|
| 404 |
+
dst_categories[AnnotationType.mask] = mask_cat
|
| 405 |
+
|
| 406 |
+
return dst_categories
|
| 407 |
+
|
| 408 |
+
def _match_annotations(self, sources):
|
| 409 |
+
all_by_type = {}
|
| 410 |
+
for s in sources:
|
| 411 |
+
src_by_type = {}
|
| 412 |
+
for a in s:
|
| 413 |
+
src_by_type.setdefault(a.type, []).append(a)
|
| 414 |
+
for k, v in src_by_type.items():
|
| 415 |
+
all_by_type.setdefault(k, []).append(v)
|
| 416 |
+
|
| 417 |
+
clusters = {}
|
| 418 |
+
for k, v in all_by_type.items():
|
| 419 |
+
clusters.setdefault(k, []).extend(self._match_ann_type(k, v))
|
| 420 |
+
|
| 421 |
+
return clusters
|
| 422 |
+
|
| 423 |
+
def _make_mergers(self, sources):
|
| 424 |
+
def _make(c, **kwargs):
|
| 425 |
+
kwargs.update(attr.asdict(self.conf))
|
| 426 |
+
fields = attr.fields_dict(c)
|
| 427 |
+
return c(**{ k: v for k, v in kwargs.items() if k in fields },
|
| 428 |
+
context=self)
|
| 429 |
+
|
| 430 |
+
def _for_type(t, **kwargs):
|
| 431 |
+
if t is AnnotationType.label:
|
| 432 |
+
return _make(LabelMerger, **kwargs)
|
| 433 |
+
elif t is AnnotationType.bbox:
|
| 434 |
+
return _make(BboxMerger, **kwargs)
|
| 435 |
+
elif t is AnnotationType.mask:
|
| 436 |
+
return _make(MaskMerger, **kwargs)
|
| 437 |
+
elif t is AnnotationType.polygon:
|
| 438 |
+
return _make(PolygonMerger, **kwargs)
|
| 439 |
+
elif t is AnnotationType.polyline:
|
| 440 |
+
return _make(LineMerger, **kwargs)
|
| 441 |
+
elif t is AnnotationType.points:
|
| 442 |
+
return _make(PointsMerger, **kwargs)
|
| 443 |
+
elif t is AnnotationType.caption:
|
| 444 |
+
return _make(CaptionsMerger, **kwargs)
|
| 445 |
+
else:
|
| 446 |
+
raise NotImplementedError("Type %s is not supported" % t)
|
| 447 |
+
|
| 448 |
+
instance_map = {}
|
| 449 |
+
for s in sources:
|
| 450 |
+
s_instances = find_instances(s)
|
| 451 |
+
for inst in s_instances:
|
| 452 |
+
inst_bbox = max_bbox([a for a in inst if a.type in
|
| 453 |
+
{AnnotationType.polygon,
|
| 454 |
+
AnnotationType.mask, AnnotationType.bbox}
|
| 455 |
+
])
|
| 456 |
+
for ann in inst:
|
| 457 |
+
instance_map[id(ann)] = [inst, inst_bbox]
|
| 458 |
+
|
| 459 |
+
self._mergers = { t: _for_type(t, instance_map=instance_map)
|
| 460 |
+
for t in AnnotationType }
|
| 461 |
+
|
| 462 |
+
def _match_ann_type(self, t, sources):
|
| 463 |
+
return self._mergers[t].match_annotations(sources)
|
| 464 |
+
|
| 465 |
+
def _merge_clusters(self, t, clusters):
|
| 466 |
+
return self._mergers[t].merge_clusters(clusters)
|
| 467 |
+
|
| 468 |
+
@staticmethod
|
| 469 |
+
def _find_cluster_groups(clusters):
|
| 470 |
+
cluster_groups = []
|
| 471 |
+
visited = set()
|
| 472 |
+
for a_idx, cluster_a in enumerate(clusters):
|
| 473 |
+
if a_idx in visited:
|
| 474 |
+
continue
|
| 475 |
+
visited.add(a_idx)
|
| 476 |
+
|
| 477 |
+
cluster_group = { id(cluster_a) }
|
| 478 |
+
|
| 479 |
+
# find segment groups in the cluster group
|
| 480 |
+
a_groups = set(ann.group for ann in cluster_a)
|
| 481 |
+
for cluster_b in clusters[a_idx+1 :]:
|
| 482 |
+
b_groups = set(ann.group for ann in cluster_b)
|
| 483 |
+
if a_groups & b_groups:
|
| 484 |
+
a_groups |= b_groups
|
| 485 |
+
|
| 486 |
+
# now we know all the segment groups in this cluster group
|
| 487 |
+
# so we can find adjacent clusters
|
| 488 |
+
for b_idx, cluster_b in enumerate(clusters[a_idx+1 :]):
|
| 489 |
+
b_idx = a_idx + 1 + b_idx
|
| 490 |
+
b_groups = set(ann.group for ann in cluster_b)
|
| 491 |
+
if a_groups & b_groups:
|
| 492 |
+
cluster_group.add( id(cluster_b) )
|
| 493 |
+
visited.add(b_idx)
|
| 494 |
+
|
| 495 |
+
if a_groups == {0}:
|
| 496 |
+
continue # skip annotations without a group
|
| 497 |
+
cluster_groups.append( (cluster_group, a_groups) )
|
| 498 |
+
return cluster_groups
|
| 499 |
+
|
| 500 |
+
def _find_cluster_attrs(self, cluster, ann):
|
| 501 |
+
quorum = self.conf.quorum or 0
|
| 502 |
+
|
| 503 |
+
# TODO: when attribute types are implemented, add linear
|
| 504 |
+
# interpolation for contiguous values
|
| 505 |
+
|
| 506 |
+
attr_votes = {} # name -> { value: score , ... }
|
| 507 |
+
for s in cluster:
|
| 508 |
+
for name, value in s.attributes.items():
|
| 509 |
+
votes = attr_votes.get(name, {})
|
| 510 |
+
votes[value] = 1 + votes.get(value, 0)
|
| 511 |
+
attr_votes[name] = votes
|
| 512 |
+
|
| 513 |
+
attributes = {}
|
| 514 |
+
for name, votes in attr_votes.items():
|
| 515 |
+
winner, count = max(votes.items(), key=lambda e: e[1])
|
| 516 |
+
if count < quorum:
|
| 517 |
+
if sum(votes.values()) < quorum:
|
| 518 |
+
# blame provokers
|
| 519 |
+
missing_sources = set(
|
| 520 |
+
self.get_ann_source(id(a)) for a in cluster
|
| 521 |
+
if s.attributes.get(name) == winner)
|
| 522 |
+
else:
|
| 523 |
+
# blame outliers
|
| 524 |
+
missing_sources = set(
|
| 525 |
+
self.get_ann_source(id(a)) for a in cluster
|
| 526 |
+
if s.attributes.get(name) != winner)
|
| 527 |
+
missing_sources = [self._dataset_map[s][1]
|
| 528 |
+
for s in missing_sources]
|
| 529 |
+
self.add_item_error(FailedAttrVotingError,
|
| 530 |
+
missing_sources, name, votes, ann)
|
| 531 |
+
continue
|
| 532 |
+
attributes[name] = winner
|
| 533 |
+
|
| 534 |
+
return attributes
|
| 535 |
+
|
| 536 |
+
def _check_cluster_sources(self, cluster):
|
| 537 |
+
if len(cluster) == len(self._dataset_map):
|
| 538 |
+
return
|
| 539 |
+
|
| 540 |
+
def _has_item(s):
|
| 541 |
+
try:
|
| 542 |
+
item =self._dataset_map[s][0].get(*self._item_id)
|
| 543 |
+
if len(item.annotations) == 0:
|
| 544 |
+
return False
|
| 545 |
+
return True
|
| 546 |
+
except KeyError:
|
| 547 |
+
return False
|
| 548 |
+
|
| 549 |
+
missing_sources = set(self._dataset_map) - \
|
| 550 |
+
set(self.get_ann_source(id(a)) for a in cluster)
|
| 551 |
+
missing_sources = [self._dataset_map[s][1] for s in missing_sources
|
| 552 |
+
if _has_item(s)]
|
| 553 |
+
if missing_sources:
|
| 554 |
+
self.add_item_error(NoMatchingAnnError, missing_sources, cluster[0])
|
| 555 |
+
|
| 556 |
+
def _check_annotation_distance(self, t, annotations):
|
| 557 |
+
for a_idx, a_ann in enumerate(annotations):
|
| 558 |
+
for b_ann in annotations[a_idx+1:]:
|
| 559 |
+
d = self._mergers[t].distance(a_ann, b_ann)
|
| 560 |
+
if self.conf.close_distance < d:
|
| 561 |
+
self.add_item_error(TooCloseError, a_ann, b_ann, d)
|
| 562 |
+
|
| 563 |
+
def _check_groups(self, annotations):
|
| 564 |
+
check_groups = []
|
| 565 |
+
for check_group_raw in self.conf.groups:
|
| 566 |
+
check_group = set(l[0] for l in check_group_raw)
|
| 567 |
+
optional = set(l[0] for l in check_group_raw if l[1])
|
| 568 |
+
check_groups.append((check_group, optional))
|
| 569 |
+
|
| 570 |
+
def _check_group(group_labels, group):
|
| 571 |
+
for check_group, optional in check_groups:
|
| 572 |
+
common = check_group & group_labels
|
| 573 |
+
real_miss = check_group - common - optional
|
| 574 |
+
extra = group_labels - check_group
|
| 575 |
+
if common and (extra or real_miss):
|
| 576 |
+
self.add_item_error(WrongGroupError, group_labels,
|
| 577 |
+
check_group, group)
|
| 578 |
+
break
|
| 579 |
+
|
| 580 |
+
groups = find_instances(annotations)
|
| 581 |
+
for group in groups:
|
| 582 |
+
group_labels = set()
|
| 583 |
+
for ann in group:
|
| 584 |
+
if not hasattr(ann, 'label'):
|
| 585 |
+
continue
|
| 586 |
+
label = self._get_label_name(ann.label)
|
| 587 |
+
|
| 588 |
+
if ann.group:
|
| 589 |
+
group_labels.add(label)
|
| 590 |
+
else:
|
| 591 |
+
_check_group({label}, [ann])
|
| 592 |
+
|
| 593 |
+
if not group_labels:
|
| 594 |
+
continue
|
| 595 |
+
_check_group(group_labels, group)
|
| 596 |
+
|
| 597 |
+
def _get_label_name(self, label_id):
|
| 598 |
+
if label_id is None:
|
| 599 |
+
return None
|
| 600 |
+
return self._categories[AnnotationType.label].items[label_id].name
|
| 601 |
+
|
| 602 |
+
def _get_label_id(self, label):
|
| 603 |
+
return self._categories[AnnotationType.label].find(label)[0]
|
| 604 |
+
|
| 605 |
+
def _get_src_label_name(self, ann, label_id):
|
| 606 |
+
if label_id is None:
|
| 607 |
+
return None
|
| 608 |
+
item_id = self._ann_map[id(ann)][1]
|
| 609 |
+
dataset_id = self._item_map[item_id][1]
|
| 610 |
+
return self._dataset_map[dataset_id][0] \
|
| 611 |
+
.categories()[AnnotationType.label].items[label_id].name
|
| 612 |
+
|
| 613 |
+
def _get_any_label_name(self, ann, label_id):
|
| 614 |
+
if label_id is None:
|
| 615 |
+
return None
|
| 616 |
+
try:
|
| 617 |
+
return self._get_src_label_name(ann, label_id)
|
| 618 |
+
except KeyError:
|
| 619 |
+
return self._get_label_name(label_id)
|
| 620 |
+
|
| 621 |
+
def _check_groups_definition(self):
|
| 622 |
+
for group in self.conf.groups:
|
| 623 |
+
for label, _ in group:
|
| 624 |
+
_, entry = self._categories[AnnotationType.label].find(label)
|
| 625 |
+
if entry is None:
|
| 626 |
+
raise ValueError("Datasets do not contain "
|
| 627 |
+
"label '%s', available labels %s" % \
|
| 628 |
+
(label, [i.name for i in
|
| 629 |
+
self._categories[AnnotationType.label].items])
|
| 630 |
+
)
|
| 631 |
+
|
| 632 |
+
@attrs(kw_only=True)
|
| 633 |
+
class AnnotationMatcher:
|
| 634 |
+
_context = attrib(type=IntersectMerge, default=None)
|
| 635 |
+
|
| 636 |
+
def match_annotations(self, sources):
|
| 637 |
+
raise NotImplementedError()
|
| 638 |
+
|
| 639 |
+
@attrs
|
| 640 |
+
class LabelMatcher(AnnotationMatcher):
|
| 641 |
+
def distance(self, a, b):
|
| 642 |
+
a_label = self._context._get_any_label_name(a, a.label)
|
| 643 |
+
b_label = self._context._get_any_label_name(b, b.label)
|
| 644 |
+
return a_label == b_label
|
| 645 |
+
|
| 646 |
+
def match_annotations(self, sources):
|
| 647 |
+
return [sum(sources, [])]
|
| 648 |
+
|
| 649 |
+
@attrs(kw_only=True)
|
| 650 |
+
class _ShapeMatcher(AnnotationMatcher):
|
| 651 |
+
pairwise_dist = attrib(converter=float, default=0.9)
|
| 652 |
+
cluster_dist = attrib(converter=float, default=-1.0)
|
| 653 |
+
|
| 654 |
+
def match_annotations(self, sources):
|
| 655 |
+
distance = self.distance
|
| 656 |
+
label_matcher = self.label_matcher
|
| 657 |
+
pairwise_dist = self.pairwise_dist
|
| 658 |
+
cluster_dist = self.cluster_dist
|
| 659 |
+
|
| 660 |
+
if cluster_dist < 0: cluster_dist = pairwise_dist
|
| 661 |
+
|
| 662 |
+
id_segm = { id(a): (a, id(s)) for s in sources for a in s }
|
| 663 |
+
|
| 664 |
+
def _is_close_enough(cluster, extra_id):
|
| 665 |
+
# check if whole cluster IoU will not be broken
|
| 666 |
+
# when this segment is added
|
| 667 |
+
b = id_segm[extra_id][0]
|
| 668 |
+
for a_id in cluster:
|
| 669 |
+
a = id_segm[a_id][0]
|
| 670 |
+
if distance(a, b) < cluster_dist:
|
| 671 |
+
return False
|
| 672 |
+
return True
|
| 673 |
+
|
| 674 |
+
def _has_same_source(cluster, extra_id):
|
| 675 |
+
b = id_segm[extra_id][1]
|
| 676 |
+
for a_id in cluster:
|
| 677 |
+
a = id_segm[a_id][1]
|
| 678 |
+
if a == b:
|
| 679 |
+
return True
|
| 680 |
+
return False
|
| 681 |
+
|
| 682 |
+
# match segments in sources, pairwise
|
| 683 |
+
adjacent = { i: [] for i in id_segm } # id(sgm) -> [id(adj_sgm1), ...]
|
| 684 |
+
for a_idx, src_a in enumerate(sources):
|
| 685 |
+
for src_b in sources[a_idx+1 :]:
|
| 686 |
+
matches, _, _, _ = match_segments(src_a, src_b,
|
| 687 |
+
dist_thresh=pairwise_dist,
|
| 688 |
+
distance=distance, label_matcher=label_matcher)
|
| 689 |
+
for a, b in matches:
|
| 690 |
+
adjacent[id(a)].append(id(b))
|
| 691 |
+
|
| 692 |
+
# join all segments into matching clusters
|
| 693 |
+
clusters = []
|
| 694 |
+
visited = set()
|
| 695 |
+
for cluster_idx in adjacent:
|
| 696 |
+
if cluster_idx in visited:
|
| 697 |
+
continue
|
| 698 |
+
|
| 699 |
+
cluster = set()
|
| 700 |
+
to_visit = { cluster_idx }
|
| 701 |
+
while to_visit:
|
| 702 |
+
c = to_visit.pop()
|
| 703 |
+
cluster.add(c)
|
| 704 |
+
visited.add(c)
|
| 705 |
+
|
| 706 |
+
for i in adjacent[c]:
|
| 707 |
+
if i in visited:
|
| 708 |
+
continue
|
| 709 |
+
if 0 < cluster_dist and not _is_close_enough(cluster, i):
|
| 710 |
+
continue
|
| 711 |
+
if _has_same_source(cluster, i):
|
| 712 |
+
continue
|
| 713 |
+
|
| 714 |
+
to_visit.add(i)
|
| 715 |
+
|
| 716 |
+
clusters.append([id_segm[i][0] for i in cluster])
|
| 717 |
+
|
| 718 |
+
return clusters
|
| 719 |
+
|
| 720 |
+
@staticmethod
|
| 721 |
+
def distance(a, b):
|
| 722 |
+
return segment_iou(a, b)
|
| 723 |
+
|
| 724 |
+
def label_matcher(self, a, b):
|
| 725 |
+
a_label = self._context._get_any_label_name(a, a.label)
|
| 726 |
+
b_label = self._context._get_any_label_name(b, b.label)
|
| 727 |
+
return a_label == b_label
|
| 728 |
+
|
| 729 |
+
@attrs
|
| 730 |
+
class BboxMatcher(_ShapeMatcher):
|
| 731 |
+
pass
|
| 732 |
+
|
| 733 |
+
@attrs
|
| 734 |
+
class PolygonMatcher(_ShapeMatcher):
|
| 735 |
+
pass
|
| 736 |
+
|
| 737 |
+
@attrs
|
| 738 |
+
class MaskMatcher(_ShapeMatcher):
|
| 739 |
+
pass
|
| 740 |
+
|
| 741 |
+
@attrs(kw_only=True)
|
| 742 |
+
class PointsMatcher(_ShapeMatcher):
|
| 743 |
+
sigma = attrib(type=list, default=None)
|
| 744 |
+
instance_map = attrib(converter=dict)
|
| 745 |
+
|
| 746 |
+
def distance(self, a, b):
|
| 747 |
+
a_bbox = self.instance_map[id(a)][1]
|
| 748 |
+
b_bbox = self.instance_map[id(b)][1]
|
| 749 |
+
if bbox_iou(a_bbox, b_bbox) <= 0:
|
| 750 |
+
return 0
|
| 751 |
+
bbox = mean_bbox([a_bbox, b_bbox])
|
| 752 |
+
return OKS(a, b, sigma=self.sigma, bbox=bbox)
|
| 753 |
+
|
| 754 |
+
@attrs
|
| 755 |
+
class LineMatcher(_ShapeMatcher):
|
| 756 |
+
@staticmethod
|
| 757 |
+
def distance(a, b):
|
| 758 |
+
a_bbox = a.get_bbox()
|
| 759 |
+
b_bbox = b.get_bbox()
|
| 760 |
+
bbox = max_bbox([a_bbox, b_bbox])
|
| 761 |
+
area = bbox[2] * bbox[3]
|
| 762 |
+
if not area:
|
| 763 |
+
return 1
|
| 764 |
+
|
| 765 |
+
# compute inter-line area, normalize by common bbox
|
| 766 |
+
point_count = max(max(len(a.points) // 2, len(b.points) // 2), 5)
|
| 767 |
+
a, sa = smooth_line(a.points, point_count)
|
| 768 |
+
b, sb = smooth_line(b.points, point_count)
|
| 769 |
+
dists = np.linalg.norm(a - b, axis=1)
|
| 770 |
+
dists = (dists[:-1] + dists[1:]) * 0.5
|
| 771 |
+
s = np.sum(dists) * 0.5 * (sa + sb) / area
|
| 772 |
+
return abs(1 - s)
|
| 773 |
+
|
| 774 |
+
@attrs
|
| 775 |
+
class CaptionsMatcher(AnnotationMatcher):
|
| 776 |
+
def match_annotations(self, sources):
|
| 777 |
+
raise NotImplementedError()
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
@attrs(kw_only=True)
|
| 781 |
+
class AnnotationMerger:
|
| 782 |
+
def merge_clusters(self, clusters):
|
| 783 |
+
raise NotImplementedError()
|
| 784 |
+
|
| 785 |
+
@attrs(kw_only=True)
|
| 786 |
+
class LabelMerger(AnnotationMerger, LabelMatcher):
|
| 787 |
+
quorum = attrib(converter=int, default=0)
|
| 788 |
+
|
| 789 |
+
def merge_clusters(self, clusters):
|
| 790 |
+
assert len(clusters) <= 1
|
| 791 |
+
if len(clusters) == 0:
|
| 792 |
+
return []
|
| 793 |
+
|
| 794 |
+
votes = {} # label -> score
|
| 795 |
+
for ann in clusters[0]:
|
| 796 |
+
label = self._context._get_src_label_name(ann, ann.label)
|
| 797 |
+
votes[label] = 1 + votes.get(label, 0)
|
| 798 |
+
|
| 799 |
+
merged = []
|
| 800 |
+
for label, count in votes.items():
|
| 801 |
+
if count < self.quorum:
|
| 802 |
+
sources = set(self.get_ann_source(id(a)) for a in clusters[0]
|
| 803 |
+
if label not in [self._context._get_src_label_name(l, l.label)
|
| 804 |
+
for l in a])
|
| 805 |
+
sources = [self._context._dataset_map[s][1] for s in sources]
|
| 806 |
+
self._context.add_item_error(FailedLabelVotingError,
|
| 807 |
+
sources, votes)
|
| 808 |
+
continue
|
| 809 |
+
|
| 810 |
+
merged.append(Label(self._context._get_label_id(label), attributes={
|
| 811 |
+
'score': count / len(self._context._dataset_map)
|
| 812 |
+
}))
|
| 813 |
+
|
| 814 |
+
return merged
|
| 815 |
+
|
| 816 |
+
@attrs(kw_only=True)
|
| 817 |
+
class _ShapeMerger(AnnotationMerger, _ShapeMatcher):
|
| 818 |
+
quorum = attrib(converter=int, default=0)
|
| 819 |
+
|
| 820 |
+
def merge_clusters(self, clusters):
|
| 821 |
+
merged = []
|
| 822 |
+
for cluster in clusters:
|
| 823 |
+
label, label_score = self.find_cluster_label(cluster)
|
| 824 |
+
shape, shape_score = self.merge_cluster_shape(cluster)
|
| 825 |
+
|
| 826 |
+
shape.z_order = max(cluster, key=lambda a: a.z_order).z_order
|
| 827 |
+
shape.label = label
|
| 828 |
+
shape.attributes['score'] = label_score * shape_score \
|
| 829 |
+
if label is not None else shape_score
|
| 830 |
+
|
| 831 |
+
merged.append(shape)
|
| 832 |
+
|
| 833 |
+
return merged
|
| 834 |
+
|
| 835 |
+
def find_cluster_label(self, cluster):
|
| 836 |
+
votes = {}
|
| 837 |
+
for s in cluster:
|
| 838 |
+
label = self._context._get_src_label_name(s, s.label)
|
| 839 |
+
state = votes.setdefault(label, [0, 0])
|
| 840 |
+
state[0] += s.attributes.get('score', 1.0)
|
| 841 |
+
state[1] += 1
|
| 842 |
+
|
| 843 |
+
label, (score, count) = max(votes.items(), key=lambda e: e[1][0])
|
| 844 |
+
if count < self.quorum:
|
| 845 |
+
self._context.add_item_error(FailedLabelVotingError, votes)
|
| 846 |
+
label = None
|
| 847 |
+
score = score / len(self._context._dataset_map)
|
| 848 |
+
label = self._context._get_label_id(label)
|
| 849 |
+
return label, score
|
| 850 |
+
|
| 851 |
+
@staticmethod
|
| 852 |
+
def _merge_cluster_shape_mean_box_nearest(cluster):
|
| 853 |
+
mbbox = Bbox(*mean_bbox(cluster))
|
| 854 |
+
dist = (segment_iou(mbbox, s) for s in cluster)
|
| 855 |
+
nearest_pos, _ = max(enumerate(dist), key=lambda e: e[1])
|
| 856 |
+
return cluster[nearest_pos]
|
| 857 |
+
|
| 858 |
+
def merge_cluster_shape(self, cluster):
|
| 859 |
+
shape = self._merge_cluster_shape_mean_box_nearest(cluster)
|
| 860 |
+
shape_score = sum(max(0, self.distance(shape, s))
|
| 861 |
+
for s in cluster) / len(cluster)
|
| 862 |
+
return shape, shape_score
|
| 863 |
+
|
| 864 |
+
@attrs
|
| 865 |
+
class BboxMerger(_ShapeMerger, BboxMatcher):
|
| 866 |
+
pass
|
| 867 |
+
|
| 868 |
+
@attrs
|
| 869 |
+
class PolygonMerger(_ShapeMerger, PolygonMatcher):
|
| 870 |
+
pass
|
| 871 |
+
|
| 872 |
+
@attrs
|
| 873 |
+
class MaskMerger(_ShapeMerger, MaskMatcher):
|
| 874 |
+
pass
|
| 875 |
+
|
| 876 |
+
@attrs
|
| 877 |
+
class PointsMerger(_ShapeMerger, PointsMatcher):
|
| 878 |
+
pass
|
| 879 |
+
|
| 880 |
+
@attrs
|
| 881 |
+
class LineMerger(_ShapeMerger, LineMatcher):
|
| 882 |
+
pass
|
| 883 |
+
|
| 884 |
+
@attrs
|
| 885 |
+
class CaptionsMerger(AnnotationMerger, CaptionsMatcher):
|
| 886 |
+
pass
|
| 887 |
+
|
| 888 |
+
def match_segments(a_segms, b_segms, distance=segment_iou, dist_thresh=1.0,
|
| 889 |
+
label_matcher=lambda a, b: a.label == b.label):
|
| 890 |
+
assert callable(distance), distance
|
| 891 |
+
assert callable(label_matcher), label_matcher
|
| 892 |
+
|
| 893 |
+
a_segms.sort(key=lambda ann: 1 - ann.attributes.get('score', 1))
|
| 894 |
+
b_segms.sort(key=lambda ann: 1 - ann.attributes.get('score', 1))
|
| 895 |
+
|
| 896 |
+
# a_matches: indices of b_segms matched to a bboxes
|
| 897 |
+
# b_matches: indices of a_segms matched to b bboxes
|
| 898 |
+
a_matches = -np.ones(len(a_segms), dtype=int)
|
| 899 |
+
b_matches = -np.ones(len(b_segms), dtype=int)
|
| 900 |
+
|
| 901 |
+
distances = np.array([[distance(a, b) for b in b_segms] for a in a_segms])
|
| 902 |
+
|
| 903 |
+
# matches: boxes we succeeded to match completely
|
| 904 |
+
# mispred: boxes we succeeded to match, having label mismatch
|
| 905 |
+
matches = []
|
| 906 |
+
mispred = []
|
| 907 |
+
|
| 908 |
+
for a_idx, a_segm in enumerate(a_segms):
|
| 909 |
+
if len(b_segms) == 0:
|
| 910 |
+
break
|
| 911 |
+
matched_b = -1
|
| 912 |
+
max_dist = -1
|
| 913 |
+
b_indices = np.argsort([not label_matcher(a_segm, b_segm)
|
| 914 |
+
for b_segm in b_segms],
|
| 915 |
+
kind='stable') # prioritize those with same label, keep score order
|
| 916 |
+
for b_idx in b_indices:
|
| 917 |
+
if 0 <= b_matches[b_idx]: # assign a_segm with max conf
|
| 918 |
+
continue
|
| 919 |
+
d = distances[a_idx, b_idx]
|
| 920 |
+
if d < dist_thresh or d <= max_dist:
|
| 921 |
+
continue
|
| 922 |
+
max_dist = d
|
| 923 |
+
matched_b = b_idx
|
| 924 |
+
|
| 925 |
+
if matched_b < 0:
|
| 926 |
+
continue
|
| 927 |
+
a_matches[a_idx] = matched_b
|
| 928 |
+
b_matches[matched_b] = a_idx
|
| 929 |
+
|
| 930 |
+
b_segm = b_segms[matched_b]
|
| 931 |
+
|
| 932 |
+
if label_matcher(a_segm, b_segm):
|
| 933 |
+
matches.append( (a_segm, b_segm) )
|
| 934 |
+
else:
|
| 935 |
+
mispred.append( (a_segm, b_segm) )
|
| 936 |
+
|
| 937 |
+
# *_umatched: boxes of (*) we failed to match
|
| 938 |
+
a_unmatched = [a_segms[i] for i, m in enumerate(a_matches) if m < 0]
|
| 939 |
+
b_unmatched = [b_segms[i] for i, m in enumerate(b_matches) if m < 0]
|
| 940 |
+
|
| 941 |
+
return matches, mispred, a_unmatched, b_unmatched
|
| 942 |
+
|
| 943 |
+
def mean_std(dataset):
|
| 944 |
+
"""
|
| 945 |
+
Computes unbiased mean and std. dev. for dataset images, channel-wise.
|
| 946 |
+
"""
|
| 947 |
+
# Use an online algorithm to:
|
| 948 |
+
# - handle different image sizes
|
| 949 |
+
# - avoid cancellation problem
|
| 950 |
+
if len(dataset) == 0:
|
| 951 |
+
return [0, 0, 0], [0, 0, 0]
|
| 952 |
+
|
| 953 |
+
stats = np.empty((len(dataset), 2, 3), dtype=np.double)
|
| 954 |
+
counts = np.empty(len(dataset), dtype=np.uint32)
|
| 955 |
+
|
| 956 |
+
mean = lambda i, s: s[i][0]
|
| 957 |
+
var = lambda i, s: s[i][1]
|
| 958 |
+
|
| 959 |
+
for i, item in enumerate(dataset):
|
| 960 |
+
counts[i] = np.prod(item.image.size)
|
| 961 |
+
|
| 962 |
+
image = item.image.data
|
| 963 |
+
if len(image.shape) == 2:
|
| 964 |
+
image = image[:, :, np.newaxis]
|
| 965 |
+
else:
|
| 966 |
+
image = image[:, :, :3]
|
| 967 |
+
# opencv is much faster than numpy here
|
| 968 |
+
cv2.meanStdDev(image.astype(np.double) / 255,
|
| 969 |
+
mean=mean(i, stats), stddev=var(i, stats))
|
| 970 |
+
|
| 971 |
+
# make variance unbiased
|
| 972 |
+
np.multiply(np.square(stats[:, 1]),
|
| 973 |
+
(counts / (counts - 1))[:, np.newaxis],
|
| 974 |
+
out=stats[:, 1])
|
| 975 |
+
|
| 976 |
+
_, mean, var = StatsCounter().compute_stats(stats, counts, mean, var)
|
| 977 |
+
return mean * 255, np.sqrt(var) * 255
|
| 978 |
+
|
| 979 |
+
class StatsCounter:
|
| 980 |
+
# Implements online parallel computation of sample variance
|
| 981 |
+
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
|
| 982 |
+
|
| 983 |
+
# Needed do avoid catastrophic cancellation in floating point computations
|
| 984 |
+
@staticmethod
|
| 985 |
+
def pairwise_stats(count_a, mean_a, var_a, count_b, mean_b, var_b):
|
| 986 |
+
delta = mean_b - mean_a
|
| 987 |
+
m_a = var_a * (count_a - 1)
|
| 988 |
+
m_b = var_b * (count_b - 1)
|
| 989 |
+
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (count_a + count_b)
|
| 990 |
+
return (
|
| 991 |
+
count_a + count_b,
|
| 992 |
+
mean_a * 0.5 + mean_b * 0.5,
|
| 993 |
+
M2 / (count_a + count_b - 1)
|
| 994 |
+
)
|
| 995 |
+
|
| 996 |
+
# stats = float array of shape N, 2 * d, d = dimensions of values
|
| 997 |
+
# count = integer array of shape N
|
| 998 |
+
# mean_accessor = function(idx, stats) to retrieve element mean
|
| 999 |
+
# variance_accessor = function(idx, stats) to retrieve element variance
|
| 1000 |
+
# Recursively computes total count, mean and variance, does O(log(N)) calls
|
| 1001 |
+
@staticmethod
|
| 1002 |
+
def compute_stats(stats, counts, mean_accessor, variance_accessor):
|
| 1003 |
+
m = mean_accessor
|
| 1004 |
+
v = variance_accessor
|
| 1005 |
+
n = len(stats)
|
| 1006 |
+
if n == 1:
|
| 1007 |
+
return counts[0], m(0, stats), v(0, stats)
|
| 1008 |
+
if n == 2:
|
| 1009 |
+
return __class__.pairwise_stats(
|
| 1010 |
+
counts[0], m(0, stats), v(0, stats),
|
| 1011 |
+
counts[1], m(1, stats), v(1, stats)
|
| 1012 |
+
)
|
| 1013 |
+
h = n // 2
|
| 1014 |
+
return __class__.pairwise_stats(
|
| 1015 |
+
*__class__.compute_stats(stats[:h], counts[:h], m, v),
|
| 1016 |
+
*__class__.compute_stats(stats[h:], counts[h:], m, v)
|
| 1017 |
+
)
|
| 1018 |
+
|
| 1019 |
+
def compute_image_statistics(dataset):
|
| 1020 |
+
stats = {
|
| 1021 |
+
'dataset': {},
|
| 1022 |
+
'subsets': {}
|
| 1023 |
+
}
|
| 1024 |
+
|
| 1025 |
+
def _extractor_stats(extractor):
|
| 1026 |
+
available = True
|
| 1027 |
+
for item in extractor:
|
| 1028 |
+
if not (item.has_image and item.image.has_data):
|
| 1029 |
+
available = False
|
| 1030 |
+
log.warn("Item %s has no image. Image stats won't be computed",
|
| 1031 |
+
item.id)
|
| 1032 |
+
break
|
| 1033 |
+
|
| 1034 |
+
stats = {
|
| 1035 |
+
'images count': len(extractor),
|
| 1036 |
+
}
|
| 1037 |
+
|
| 1038 |
+
if available:
|
| 1039 |
+
mean, std = mean_std(extractor)
|
| 1040 |
+
stats.update({
|
| 1041 |
+
'image mean': [float(n) for n in mean[::-1]],
|
| 1042 |
+
'image std': [float(n) for n in std[::-1]],
|
| 1043 |
+
})
|
| 1044 |
+
else:
|
| 1045 |
+
stats.update({
|
| 1046 |
+
'image mean': 'n/a',
|
| 1047 |
+
'image std': 'n/a',
|
| 1048 |
+
})
|
| 1049 |
+
return stats
|
| 1050 |
+
|
| 1051 |
+
stats['dataset'].update(_extractor_stats(dataset))
|
| 1052 |
+
|
| 1053 |
+
subsets = dataset.subsets() or [None]
|
| 1054 |
+
if subsets and 0 < len([s for s in subsets if s]):
|
| 1055 |
+
for subset_name in subsets:
|
| 1056 |
+
stats['subsets'][subset_name] = _extractor_stats(
|
| 1057 |
+
dataset.get_subset(subset_name))
|
| 1058 |
+
|
| 1059 |
+
return stats
|
| 1060 |
+
|
| 1061 |
+
def compute_ann_statistics(dataset):
|
| 1062 |
+
labels = dataset.categories().get(AnnotationType.label)
|
| 1063 |
+
def get_label(ann):
|
| 1064 |
+
return labels.items[ann.label].name if ann.label is not None else None
|
| 1065 |
+
|
| 1066 |
+
stats = {
|
| 1067 |
+
'images count': len(dataset),
|
| 1068 |
+
'annotations count': 0,
|
| 1069 |
+
'unannotated images count': 0,
|
| 1070 |
+
'unannotated images': [],
|
| 1071 |
+
'annotations by type': { t.name: {
|
| 1072 |
+
'count': 0,
|
| 1073 |
+
} for t in AnnotationType },
|
| 1074 |
+
'annotations': {},
|
| 1075 |
+
}
|
| 1076 |
+
by_type = stats['annotations by type']
|
| 1077 |
+
|
| 1078 |
+
attr_template = {
|
| 1079 |
+
'count': 0,
|
| 1080 |
+
'values count': 0,
|
| 1081 |
+
'values present': set(),
|
| 1082 |
+
'distribution': {}, # value -> (count, total%)
|
| 1083 |
+
}
|
| 1084 |
+
label_stat = {
|
| 1085 |
+
'count': 0,
|
| 1086 |
+
'distribution': { l.name: [0, 0] for l in labels.items
|
| 1087 |
+
}, # label -> (count, total%)
|
| 1088 |
+
|
| 1089 |
+
'attributes': {},
|
| 1090 |
+
}
|
| 1091 |
+
stats['annotations']['labels'] = label_stat
|
| 1092 |
+
segm_stat = {
|
| 1093 |
+
'avg. area': 0,
|
| 1094 |
+
'area distribution': [], # a histogram with 10 bins
|
| 1095 |
+
# (min, min+10%), ..., (min+90%, max) -> (count, total%)
|
| 1096 |
+
|
| 1097 |
+
'pixel distribution': { l.name: [0, 0] for l in labels.items
|
| 1098 |
+
}, # label -> (count, total%)
|
| 1099 |
+
}
|
| 1100 |
+
stats['annotations']['segments'] = segm_stat
|
| 1101 |
+
segm_areas = []
|
| 1102 |
+
pixel_dist = segm_stat['pixel distribution']
|
| 1103 |
+
total_pixels = 0
|
| 1104 |
+
|
| 1105 |
+
for item in dataset:
|
| 1106 |
+
if len(item.annotations) == 0:
|
| 1107 |
+
stats['unannotated images'].append(item.id)
|
| 1108 |
+
continue
|
| 1109 |
+
|
| 1110 |
+
for ann in item.annotations:
|
| 1111 |
+
by_type[ann.type.name]['count'] += 1
|
| 1112 |
+
|
| 1113 |
+
if not hasattr(ann, 'label') or ann.label is None:
|
| 1114 |
+
continue
|
| 1115 |
+
|
| 1116 |
+
if ann.type in {AnnotationType.mask,
|
| 1117 |
+
AnnotationType.polygon, AnnotationType.bbox}:
|
| 1118 |
+
area = ann.get_area()
|
| 1119 |
+
segm_areas.append(area)
|
| 1120 |
+
pixel_dist[get_label(ann)][0] += int(area)
|
| 1121 |
+
|
| 1122 |
+
label_stat['count'] += 1
|
| 1123 |
+
label_stat['distribution'][get_label(ann)][0] += 1
|
| 1124 |
+
|
| 1125 |
+
for name, value in ann.attributes.items():
|
| 1126 |
+
if name.lower() in { 'occluded', 'visibility', 'score',
|
| 1127 |
+
'id', 'track_id' }:
|
| 1128 |
+
continue
|
| 1129 |
+
attrs_stat = label_stat['attributes'].setdefault(name,
|
| 1130 |
+
deepcopy(attr_template))
|
| 1131 |
+
attrs_stat['count'] += 1
|
| 1132 |
+
attrs_stat['values present'].add(str(value))
|
| 1133 |
+
attrs_stat['distribution'] \
|
| 1134 |
+
.setdefault(str(value), [0, 0])[0] += 1
|
| 1135 |
+
|
| 1136 |
+
stats['annotations count'] = sum(t['count'] for t in
|
| 1137 |
+
stats['annotations by type'].values())
|
| 1138 |
+
stats['unannotated images count'] = len(stats['unannotated images'])
|
| 1139 |
+
|
| 1140 |
+
for label_info in label_stat['distribution'].values():
|
| 1141 |
+
label_info[1] = label_info[0] / (label_stat['count'] or 1)
|
| 1142 |
+
|
| 1143 |
+
for label_attr in label_stat['attributes'].values():
|
| 1144 |
+
label_attr['values count'] = len(label_attr['values present'])
|
| 1145 |
+
label_attr['values present'] = sorted(label_attr['values present'])
|
| 1146 |
+
for attr_info in label_attr['distribution'].values():
|
| 1147 |
+
attr_info[1] = attr_info[0] / (label_attr['count'] or 1)
|
| 1148 |
+
|
| 1149 |
+
# numpy.sum might be faster, but could overflow with large datasets.
|
| 1150 |
+
# Python's int can transparently mutate to be of indefinite precision (long)
|
| 1151 |
+
total_pixels = sum(int(a) for a in segm_areas)
|
| 1152 |
+
|
| 1153 |
+
segm_stat['avg. area'] = total_pixels / (len(segm_areas) or 1.0)
|
| 1154 |
+
|
| 1155 |
+
for label_info in segm_stat['pixel distribution'].values():
|
| 1156 |
+
label_info[1] = label_info[0] / (total_pixels or 1)
|
| 1157 |
+
|
| 1158 |
+
if len(segm_areas) != 0:
|
| 1159 |
+
hist, bins = np.histogram(segm_areas)
|
| 1160 |
+
segm_stat['area distribution'] = [{
|
| 1161 |
+
'min': float(bin_min), 'max': float(bin_max),
|
| 1162 |
+
'count': int(c), 'percent': int(c) / len(segm_areas)
|
| 1163 |
+
} for c, (bin_min, bin_max) in zip(hist, zip(bins[:-1], bins[1:]))]
|
| 1164 |
+
|
| 1165 |
+
return stats
|
| 1166 |
+
|
| 1167 |
+
@attrs
|
| 1168 |
+
class DistanceComparator:
|
| 1169 |
+
iou_threshold = attrib(converter=float, default=0.5)
|
| 1170 |
+
|
| 1171 |
+
@staticmethod
|
| 1172 |
+
def match_datasets(a, b):
|
| 1173 |
+
a_items = set((item.id, item.subset) for item in a)
|
| 1174 |
+
b_items = set((item.id, item.subset) for item in b)
|
| 1175 |
+
|
| 1176 |
+
matches = a_items & b_items
|
| 1177 |
+
a_unmatched = a_items - b_items
|
| 1178 |
+
b_unmatched = b_items - a_items
|
| 1179 |
+
return matches, a_unmatched, b_unmatched
|
| 1180 |
+
|
| 1181 |
+
@staticmethod
|
| 1182 |
+
def match_classes(a, b):
|
| 1183 |
+
a_label_cat = a.categories().get(AnnotationType.label, LabelCategories())
|
| 1184 |
+
b_label_cat = b.categories().get(AnnotationType.label, LabelCategories())
|
| 1185 |
+
|
| 1186 |
+
a_labels = set(c.name for c in a_label_cat)
|
| 1187 |
+
b_labels = set(c.name for c in b_label_cat)
|
| 1188 |
+
|
| 1189 |
+
matches = a_labels & b_labels
|
| 1190 |
+
a_unmatched = a_labels - b_labels
|
| 1191 |
+
b_unmatched = b_labels - a_labels
|
| 1192 |
+
return matches, a_unmatched, b_unmatched
|
| 1193 |
+
|
| 1194 |
+
def match_annotations(self, item_a, item_b):
|
| 1195 |
+
return { t: self._match_ann_type(t, item_a, item_b) }
|
| 1196 |
+
|
| 1197 |
+
def _match_ann_type(self, t, *args):
|
| 1198 |
+
# pylint: disable=no-value-for-parameter
|
| 1199 |
+
if t == AnnotationType.label:
|
| 1200 |
+
return self.match_labels(*args)
|
| 1201 |
+
elif t == AnnotationType.bbox:
|
| 1202 |
+
return self.match_boxes(*args)
|
| 1203 |
+
elif t == AnnotationType.polygon:
|
| 1204 |
+
return self.match_polygons(*args)
|
| 1205 |
+
elif t == AnnotationType.mask:
|
| 1206 |
+
return self.match_masks(*args)
|
| 1207 |
+
elif t == AnnotationType.points:
|
| 1208 |
+
return self.match_points(*args)
|
| 1209 |
+
elif t == AnnotationType.polyline:
|
| 1210 |
+
return self.match_lines(*args)
|
| 1211 |
+
# pylint: enable=no-value-for-parameter
|
| 1212 |
+
else:
|
| 1213 |
+
raise NotImplementedError("Unexpected annotation type %s" % t)
|
| 1214 |
+
|
| 1215 |
+
@staticmethod
|
| 1216 |
+
def _get_ann_type(t, item):
|
| 1217 |
+
return get_ann_type(item.annotations, t)
|
| 1218 |
+
|
| 1219 |
+
def match_labels(self, item_a, item_b):
|
| 1220 |
+
a_labels = set(a.label for a in
|
| 1221 |
+
self._get_ann_type(AnnotationType.label, item_a))
|
| 1222 |
+
b_labels = set(a.label for a in
|
| 1223 |
+
self._get_ann_type(AnnotationType.label, item_b))
|
| 1224 |
+
|
| 1225 |
+
matches = a_labels & b_labels
|
| 1226 |
+
a_unmatched = a_labels - b_labels
|
| 1227 |
+
b_unmatched = b_labels - a_labels
|
| 1228 |
+
return matches, a_unmatched, b_unmatched
|
| 1229 |
+
|
| 1230 |
+
def _match_segments(self, t, item_a, item_b):
|
| 1231 |
+
a_boxes = self._get_ann_type(t, item_a)
|
| 1232 |
+
b_boxes = self._get_ann_type(t, item_b)
|
| 1233 |
+
return match_segments(a_boxes, b_boxes, dist_thresh=self.iou_threshold)
|
| 1234 |
+
|
| 1235 |
+
def match_polygons(self, item_a, item_b):
|
| 1236 |
+
return self._match_segments(AnnotationType.polygon, item_a, item_b)
|
| 1237 |
+
|
| 1238 |
+
def match_masks(self, item_a, item_b):
|
| 1239 |
+
return self._match_segments(AnnotationType.mask, item_a, item_b)
|
| 1240 |
+
|
| 1241 |
+
def match_boxes(self, item_a, item_b):
|
| 1242 |
+
return self._match_segments(AnnotationType.bbox, item_a, item_b)
|
| 1243 |
+
|
| 1244 |
+
def match_points(self, item_a, item_b):
|
| 1245 |
+
a_points = self._get_ann_type(AnnotationType.points, item_a)
|
| 1246 |
+
b_points = self._get_ann_type(AnnotationType.points, item_b)
|
| 1247 |
+
|
| 1248 |
+
instance_map = {}
|
| 1249 |
+
for s in [item_a.annotations, item_b.annotations]:
|
| 1250 |
+
s_instances = find_instances(s)
|
| 1251 |
+
for inst in s_instances:
|
| 1252 |
+
inst_bbox = max_bbox(inst)
|
| 1253 |
+
for ann in inst:
|
| 1254 |
+
instance_map[id(ann)] = [inst, inst_bbox]
|
| 1255 |
+
matcher = PointsMatcher(instance_map=instance_map)
|
| 1256 |
+
|
| 1257 |
+
return match_segments(a_points, b_points,
|
| 1258 |
+
dist_thresh=self.iou_threshold, distance=matcher.distance)
|
| 1259 |
+
|
| 1260 |
+
def match_lines(self, item_a, item_b):
|
| 1261 |
+
a_lines = self._get_ann_type(AnnotationType.polyline, item_a)
|
| 1262 |
+
b_lines = self._get_ann_type(AnnotationType.polyline, item_b)
|
| 1263 |
+
|
| 1264 |
+
matcher = LineMatcher()
|
| 1265 |
+
|
| 1266 |
+
return match_segments(a_lines, b_lines,
|
| 1267 |
+
dist_thresh=self.iou_threshold, distance=matcher.distance)
|
| 1268 |
+
|
| 1269 |
+
def match_items_by_id(a, b):
|
| 1270 |
+
a_items = set((item.id, item.subset) for item in a)
|
| 1271 |
+
b_items = set((item.id, item.subset) for item in b)
|
| 1272 |
+
|
| 1273 |
+
matches = a_items & b_items
|
| 1274 |
+
matches = [([m], [m]) for m in matches]
|
| 1275 |
+
a_unmatched = a_items - b_items
|
| 1276 |
+
b_unmatched = b_items - a_items
|
| 1277 |
+
return matches, a_unmatched, b_unmatched
|
| 1278 |
+
|
| 1279 |
+
def match_items_by_image_hash(a, b):
|
| 1280 |
+
def _hash(item):
|
| 1281 |
+
if not item.image.has_data:
|
| 1282 |
+
log.warning("Image (%s, %s) has no image "
|
| 1283 |
+
"data, counted as unmatched", item.id, item.subset)
|
| 1284 |
+
return None
|
| 1285 |
+
return hashlib.md5(item.image.data.tobytes()).hexdigest()
|
| 1286 |
+
|
| 1287 |
+
def _build_hashmap(source):
|
| 1288 |
+
d = {}
|
| 1289 |
+
for item in source:
|
| 1290 |
+
h = _hash(item)
|
| 1291 |
+
if h is None:
|
| 1292 |
+
h = str(id(item)) # anything unique
|
| 1293 |
+
d.setdefault(h, []).append((item.id, item.subset))
|
| 1294 |
+
return d
|
| 1295 |
+
|
| 1296 |
+
a_hash = _build_hashmap(a)
|
| 1297 |
+
b_hash = _build_hashmap(b)
|
| 1298 |
+
|
| 1299 |
+
a_items = set(a_hash)
|
| 1300 |
+
b_items = set(b_hash)
|
| 1301 |
+
|
| 1302 |
+
matches = a_items & b_items
|
| 1303 |
+
a_unmatched = a_items - b_items
|
| 1304 |
+
b_unmatched = b_items - a_items
|
| 1305 |
+
|
| 1306 |
+
matches = [(a_hash[h], b_hash[h]) for h in matches]
|
| 1307 |
+
a_unmatched = set(i for h in a_unmatched for i in a_hash[h])
|
| 1308 |
+
b_unmatched = set(i for h in b_unmatched for i in b_hash[h])
|
| 1309 |
+
|
| 1310 |
+
return matches, a_unmatched, b_unmatched
|
| 1311 |
+
|
| 1312 |
+
@attrs
|
| 1313 |
+
class ExactComparator:
|
| 1314 |
+
match_images = attrib(kw_only=True, type=bool, default=False)
|
| 1315 |
+
ignored_fields = attrib(kw_only=True,
|
| 1316 |
+
factory=set, validator=default_if_none(set))
|
| 1317 |
+
ignored_attrs = attrib(kw_only=True,
|
| 1318 |
+
factory=set, validator=default_if_none(set))
|
| 1319 |
+
ignored_item_attrs = attrib(kw_only=True,
|
| 1320 |
+
factory=set, validator=default_if_none(set))
|
| 1321 |
+
|
| 1322 |
+
_test = attrib(init=False, type=TestCase)
|
| 1323 |
+
errors = attrib(init=False, type=list)
|
| 1324 |
+
|
| 1325 |
+
def __attrs_post_init__(self):
|
| 1326 |
+
self._test = TestCase()
|
| 1327 |
+
self._test.maxDiff = None
|
| 1328 |
+
|
| 1329 |
+
|
| 1330 |
+
def _match_items(self, a, b):
|
| 1331 |
+
if self.match_images:
|
| 1332 |
+
return match_items_by_image_hash(a, b)
|
| 1333 |
+
else:
|
| 1334 |
+
return match_items_by_id(a, b)
|
| 1335 |
+
|
| 1336 |
+
def _compare_categories(self, a, b):
|
| 1337 |
+
test = self._test
|
| 1338 |
+
errors = self.errors
|
| 1339 |
+
|
| 1340 |
+
try:
|
| 1341 |
+
test.assertEqual(
|
| 1342 |
+
sorted(a, key=lambda t: t.value),
|
| 1343 |
+
sorted(b, key=lambda t: t.value)
|
| 1344 |
+
)
|
| 1345 |
+
except AssertionError as e:
|
| 1346 |
+
errors.append({'type': 'categories', 'message': str(e)})
|
| 1347 |
+
|
| 1348 |
+
if AnnotationType.label in a:
|
| 1349 |
+
try:
|
| 1350 |
+
test.assertEqual(
|
| 1351 |
+
a[AnnotationType.label].items,
|
| 1352 |
+
b[AnnotationType.label].items,
|
| 1353 |
+
)
|
| 1354 |
+
except AssertionError as e:
|
| 1355 |
+
errors.append({'type': 'labels', 'message': str(e)})
|
| 1356 |
+
if AnnotationType.mask in a:
|
| 1357 |
+
try:
|
| 1358 |
+
test.assertEqual(
|
| 1359 |
+
a[AnnotationType.mask].colormap,
|
| 1360 |
+
b[AnnotationType.mask].colormap,
|
| 1361 |
+
)
|
| 1362 |
+
except AssertionError as e:
|
| 1363 |
+
errors.append({'type': 'colormap', 'message': str(e)})
|
| 1364 |
+
if AnnotationType.points in a:
|
| 1365 |
+
try:
|
| 1366 |
+
test.assertEqual(
|
| 1367 |
+
a[AnnotationType.points].items,
|
| 1368 |
+
b[AnnotationType.points].items,
|
| 1369 |
+
)
|
| 1370 |
+
except AssertionError as e:
|
| 1371 |
+
errors.append({'type': 'points', 'message': str(e)})
|
| 1372 |
+
|
| 1373 |
+
def _compare_annotations(self, a, b):
|
| 1374 |
+
ignored_fields = self.ignored_fields
|
| 1375 |
+
ignored_attrs = self.ignored_attrs
|
| 1376 |
+
|
| 1377 |
+
a_fields = { k: None for k in vars(a) if k in ignored_fields }
|
| 1378 |
+
b_fields = { k: None for k in vars(b) if k in ignored_fields }
|
| 1379 |
+
if 'attributes' not in ignored_fields:
|
| 1380 |
+
a_fields['attributes'] = filter_dict(a.attributes, ignored_attrs)
|
| 1381 |
+
b_fields['attributes'] = filter_dict(b.attributes, ignored_attrs)
|
| 1382 |
+
|
| 1383 |
+
result = a.wrap(**a_fields) == b.wrap(**b_fields)
|
| 1384 |
+
|
| 1385 |
+
return result
|
| 1386 |
+
|
| 1387 |
+
def _compare_items(self, item_a, item_b):
|
| 1388 |
+
test = self._test
|
| 1389 |
+
|
| 1390 |
+
a_id = (item_a.id, item_a.subset)
|
| 1391 |
+
b_id = (item_b.id, item_b.subset)
|
| 1392 |
+
|
| 1393 |
+
matched = []
|
| 1394 |
+
unmatched = []
|
| 1395 |
+
errors = []
|
| 1396 |
+
|
| 1397 |
+
try:
|
| 1398 |
+
test.assertEqual(
|
| 1399 |
+
filter_dict(item_a.attributes, self.ignored_item_attrs),
|
| 1400 |
+
filter_dict(item_b.attributes, self.ignored_item_attrs)
|
| 1401 |
+
)
|
| 1402 |
+
except AssertionError as e:
|
| 1403 |
+
errors.append({'type': 'item_attr',
|
| 1404 |
+
'a_item': a_id, 'b_item': b_id, 'message': str(e)})
|
| 1405 |
+
|
| 1406 |
+
b_annotations = item_b.annotations[:]
|
| 1407 |
+
for ann_a in item_a.annotations:
|
| 1408 |
+
ann_b_candidates = [x for x in item_b.annotations
|
| 1409 |
+
if x.type == ann_a.type]
|
| 1410 |
+
|
| 1411 |
+
ann_b = find(enumerate(self._compare_annotations(ann_a, x)
|
| 1412 |
+
for x in ann_b_candidates), lambda x: x[1])
|
| 1413 |
+
if ann_b is None:
|
| 1414 |
+
unmatched.append({
|
| 1415 |
+
'item': a_id, 'source': 'a', 'ann': str(ann_a),
|
| 1416 |
+
})
|
| 1417 |
+
continue
|
| 1418 |
+
else:
|
| 1419 |
+
ann_b = ann_b_candidates[ann_b[0]]
|
| 1420 |
+
|
| 1421 |
+
b_annotations.remove(ann_b) # avoid repeats
|
| 1422 |
+
matched.append({'a_item': a_id, 'b_item': b_id,
|
| 1423 |
+
'a': str(ann_a), 'b': str(ann_b)})
|
| 1424 |
+
|
| 1425 |
+
for ann_b in b_annotations:
|
| 1426 |
+
unmatched.append({'item': b_id, 'source': 'b', 'ann': str(ann_b)})
|
| 1427 |
+
|
| 1428 |
+
return matched, unmatched, errors
|
| 1429 |
+
|
| 1430 |
+
def compare_datasets(self, a, b):
|
| 1431 |
+
self.errors = []
|
| 1432 |
+
errors = self.errors
|
| 1433 |
+
|
| 1434 |
+
self._compare_categories(a.categories(), b.categories())
|
| 1435 |
+
|
| 1436 |
+
matched = []
|
| 1437 |
+
unmatched = []
|
| 1438 |
+
|
| 1439 |
+
matches, a_unmatched, b_unmatched = self._match_items(a, b)
|
| 1440 |
+
|
| 1441 |
+
if a.categories().get(AnnotationType.label) != \
|
| 1442 |
+
b.categories().get(AnnotationType.label):
|
| 1443 |
+
return matched, unmatched, a_unmatched, b_unmatched, errors
|
| 1444 |
+
|
| 1445 |
+
_dist = lambda s: len(s[1]) + len(s[2])
|
| 1446 |
+
for a_ids, b_ids in matches:
|
| 1447 |
+
# build distance matrix
|
| 1448 |
+
match_status = {} # (a_id, b_id): [matched, unmatched, errors]
|
| 1449 |
+
a_matches = { a_id: None for a_id in a_ids }
|
| 1450 |
+
b_matches = { b_id: None for b_id in b_ids }
|
| 1451 |
+
|
| 1452 |
+
for a_id in a_ids:
|
| 1453 |
+
item_a = a.get(*a_id)
|
| 1454 |
+
candidates = {}
|
| 1455 |
+
|
| 1456 |
+
for b_id in b_ids:
|
| 1457 |
+
item_b = b.get(*b_id)
|
| 1458 |
+
|
| 1459 |
+
i_m, i_um, i_err = self._compare_items(item_a, item_b)
|
| 1460 |
+
candidates[b_id] = [i_m, i_um, i_err]
|
| 1461 |
+
|
| 1462 |
+
if len(i_um) == 0:
|
| 1463 |
+
a_matches[a_id] = b_id
|
| 1464 |
+
b_matches[b_id] = a_id
|
| 1465 |
+
matched.extend(i_m)
|
| 1466 |
+
errors.extend(i_err)
|
| 1467 |
+
break
|
| 1468 |
+
|
| 1469 |
+
match_status[a_id] = candidates
|
| 1470 |
+
|
| 1471 |
+
# assign
|
| 1472 |
+
for a_id in a_ids:
|
| 1473 |
+
if len(b_ids) == 0:
|
| 1474 |
+
break
|
| 1475 |
+
|
| 1476 |
+
# find the closest, ignore already assigned
|
| 1477 |
+
matched_b = a_matches[a_id]
|
| 1478 |
+
if matched_b is not None:
|
| 1479 |
+
continue
|
| 1480 |
+
min_dist = -1
|
| 1481 |
+
for b_id in b_ids:
|
| 1482 |
+
if b_matches[b_id] is not None:
|
| 1483 |
+
continue
|
| 1484 |
+
d = _dist(match_status[a_id][b_id])
|
| 1485 |
+
if d < min_dist and 0 <= min_dist:
|
| 1486 |
+
continue
|
| 1487 |
+
min_dist = d
|
| 1488 |
+
matched_b = b_id
|
| 1489 |
+
|
| 1490 |
+
if matched_b is None:
|
| 1491 |
+
continue
|
| 1492 |
+
a_matches[a_id] = matched_b
|
| 1493 |
+
b_matches[matched_b] = a_id
|
| 1494 |
+
|
| 1495 |
+
m = match_status[a_id][matched_b]
|
| 1496 |
+
matched.extend(m[0])
|
| 1497 |
+
unmatched.extend(m[1])
|
| 1498 |
+
errors.extend(m[2])
|
| 1499 |
+
|
| 1500 |
+
a_unmatched |= set(a_id for a_id, m in a_matches.items() if not m)
|
| 1501 |
+
b_unmatched |= set(b_id for b_id, m in b_matches.items() if not m)
|
| 1502 |
+
|
| 1503 |
+
return matched, unmatched, a_unmatched, b_unmatched, errors
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (C) 2020 Intel Corporation
|
| 2 |
+
#
|
| 3 |
+
# SPDX-License-Identifier: MIT
|
| 4 |
+
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/details/ac.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from datumaro.util.tf_util import import_tf
|
| 7 |
+
import_tf() # prevent TF loading and potential interpeter crash
|
| 8 |
+
|
| 9 |
+
from itertools import groupby
|
| 10 |
+
|
| 11 |
+
from accuracy_checker.adapters import create_adapter
|
| 12 |
+
from accuracy_checker.data_readers import DataRepresentation
|
| 13 |
+
from accuracy_checker.launcher import InputFeeder, create_launcher
|
| 14 |
+
from accuracy_checker.postprocessor import PostprocessingExecutor
|
| 15 |
+
from accuracy_checker.preprocessor import PreprocessingExecutor
|
| 16 |
+
from accuracy_checker.utils import extract_image_representations
|
| 17 |
+
|
| 18 |
+
from datumaro.components.extractor import AnnotationType, LabelCategories
|
| 19 |
+
|
| 20 |
+
from .representation import import_predictions
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class _FakeDataset:
|
| 24 |
+
def __init__(self, metadata=None):
|
| 25 |
+
self.metadata = metadata or {}
|
| 26 |
+
|
| 27 |
+
class GenericAcLauncher:
|
| 28 |
+
@staticmethod
|
| 29 |
+
def from_config(config):
|
| 30 |
+
launcher_config = config['launcher']
|
| 31 |
+
launcher = create_launcher(launcher_config)
|
| 32 |
+
|
| 33 |
+
dataset = _FakeDataset()
|
| 34 |
+
adapter_config = config.get('adapter') or launcher_config.get('adapter')
|
| 35 |
+
label_config = adapter_config.get('labels') \
|
| 36 |
+
if isinstance(adapter_config, dict) else None
|
| 37 |
+
if label_config:
|
| 38 |
+
assert isinstance(label_config, (list, dict))
|
| 39 |
+
if isinstance(label_config, list):
|
| 40 |
+
label_config = dict(enumerate(label_config))
|
| 41 |
+
|
| 42 |
+
dataset.metadata = {'label_map': {
|
| 43 |
+
int(key): label for key, label in label_config.items()
|
| 44 |
+
}}
|
| 45 |
+
adapter = create_adapter(adapter_config, launcher, dataset)
|
| 46 |
+
|
| 47 |
+
preproc_config = config.get('preprocessing')
|
| 48 |
+
preproc = None
|
| 49 |
+
if preproc_config:
|
| 50 |
+
preproc = PreprocessingExecutor(preproc_config,
|
| 51 |
+
dataset_meta=dataset.metadata,
|
| 52 |
+
input_shapes=launcher.inputs_info_for_meta()
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
postproc_config = config.get('postprocessing')
|
| 56 |
+
postproc = None
|
| 57 |
+
if postproc_config:
|
| 58 |
+
postproc = PostprocessingExecutor(postproc_config,
|
| 59 |
+
dataset_meta=dataset.metadata,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
return __class__(launcher,
|
| 63 |
+
adapter=adapter, preproc=preproc, postproc=postproc)
|
| 64 |
+
|
| 65 |
+
def __init__(self, launcher, adapter=None,
|
| 66 |
+
preproc=None, postproc=None, input_feeder=None):
|
| 67 |
+
self._launcher = launcher
|
| 68 |
+
self._input_feeder = input_feeder or InputFeeder(
|
| 69 |
+
launcher.config.get('inputs', []), launcher.inputs,
|
| 70 |
+
launcher.fit_to_input, launcher.default_layout
|
| 71 |
+
)
|
| 72 |
+
self._adapter = adapter
|
| 73 |
+
self._preproc = preproc
|
| 74 |
+
self._postproc = postproc
|
| 75 |
+
|
| 76 |
+
self._categories = self._init_categories()
|
| 77 |
+
|
| 78 |
+
def launch_raw(self, inputs):
|
| 79 |
+
ids = range(len(inputs))
|
| 80 |
+
inputs = [DataRepresentation(inp, identifier=id)
|
| 81 |
+
for id, inp in zip(ids, inputs)]
|
| 82 |
+
_, batch_meta = extract_image_representations(inputs)
|
| 83 |
+
|
| 84 |
+
if self._preproc:
|
| 85 |
+
inputs = self._preproc.process(inputs)
|
| 86 |
+
|
| 87 |
+
inputs = self._input_feeder.fill_inputs(inputs)
|
| 88 |
+
outputs = self._launcher.predict(inputs, batch_meta)
|
| 89 |
+
|
| 90 |
+
if self._adapter:
|
| 91 |
+
outputs = self._adapter.process(outputs, ids, batch_meta)
|
| 92 |
+
|
| 93 |
+
if self._postproc:
|
| 94 |
+
outputs = self._postproc.process(outputs)
|
| 95 |
+
|
| 96 |
+
return outputs
|
| 97 |
+
|
| 98 |
+
def launch(self, inputs):
|
| 99 |
+
outputs = self.launch_raw(inputs)
|
| 100 |
+
return [import_predictions(g) for _, g in
|
| 101 |
+
groupby(outputs, key=lambda o: o.identifier)]
|
| 102 |
+
|
| 103 |
+
def categories(self):
|
| 104 |
+
return self._categories
|
| 105 |
+
|
| 106 |
+
def _init_categories(self):
|
| 107 |
+
if self._adapter is None or self._adapter.label_map is None:
|
| 108 |
+
return None
|
| 109 |
+
|
| 110 |
+
label_map = sorted(self._adapter.label_map.items(), key=lambda e: e[0])
|
| 111 |
+
|
| 112 |
+
label_cat = LabelCategories()
|
| 113 |
+
for _, label in label_map:
|
| 114 |
+
label_cat.add(label)
|
| 115 |
+
|
| 116 |
+
return { AnnotationType.label: label_cat }
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/details/representation.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from datumaro.util.tf_util import import_tf
|
| 7 |
+
import_tf() # prevent TF loading and potential interpeter crash
|
| 8 |
+
|
| 9 |
+
import accuracy_checker.representation as ac
|
| 10 |
+
|
| 11 |
+
import datumaro.components.extractor as dm
|
| 12 |
+
from datumaro.util.annotation_util import softmax
|
| 13 |
+
|
| 14 |
+
def import_predictions(predictions):
|
| 15 |
+
# Convert Accuracy checker predictions to Datumaro annotations
|
| 16 |
+
|
| 17 |
+
anns = []
|
| 18 |
+
|
| 19 |
+
for pred in predictions:
|
| 20 |
+
anns.extend(import_prediction(pred))
|
| 21 |
+
|
| 22 |
+
return anns
|
| 23 |
+
|
| 24 |
+
def import_prediction(pred):
|
| 25 |
+
if isinstance(pred, ac.ClassificationPrediction):
|
| 26 |
+
scores = softmax(pred.scores)
|
| 27 |
+
return (dm.Label(label_id, attributes={'score': float(score)})
|
| 28 |
+
for label_id, score in enumerate(scores))
|
| 29 |
+
elif isinstance(pred, ac.ArgMaxClassificationPrediction):
|
| 30 |
+
return (dm.Label(int(pred.label)), )
|
| 31 |
+
elif isinstance(pred, ac.CharacterRecognitionPrediction):
|
| 32 |
+
return (dm.Label(int(pred.label)), )
|
| 33 |
+
elif isinstance(pred, (ac.DetectionPrediction, ac.ActionDetectionPrediction)):
|
| 34 |
+
return (dm.Bbox(x0, y0, x1 - x0, y1 - y0, int(label_id),
|
| 35 |
+
attributes={'score': float(score)})
|
| 36 |
+
for label, score, x0, y0, x1, y1 in zip(pred.labels, pred.scores,
|
| 37 |
+
pred.x_mins, pred.y_mins, pred.x_maxs, pred.y_maxs)
|
| 38 |
+
)
|
| 39 |
+
elif isinstance(pred, ac.DepthEstimationPrediction):
|
| 40 |
+
return (dm.Mask(pred.depth_map), ) # 2d floating point mask
|
| 41 |
+
# elif isinstance(pred, ac.HitRatioPrediction):
|
| 42 |
+
# -
|
| 43 |
+
elif isinstance(pred, ac.ImageInpaintingPrediction):
|
| 44 |
+
return (dm.Mask(pred.value), ) # an image
|
| 45 |
+
# elif isinstance(pred, ac.MultiLabelRecognitionPrediction):
|
| 46 |
+
# -
|
| 47 |
+
# elif isinstance(pred, ac.MachineTranslationPrediction):
|
| 48 |
+
# -
|
| 49 |
+
# elif isinstance(pred, ac.QuestionAnsweringPrediction):
|
| 50 |
+
# -
|
| 51 |
+
# elif isinstance(pred, ac.PoseEstimation3dPrediction):
|
| 52 |
+
# -
|
| 53 |
+
# elif isinstance(pred, ac.PoseEstimationPrediction):
|
| 54 |
+
# -
|
| 55 |
+
# elif isinstance(pred, ac.RegressionPrediction):
|
| 56 |
+
# -
|
| 57 |
+
else:
|
| 58 |
+
raise NotImplementedError("Can't convert %s" % type(pred))
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/__init__.py
ADDED
|
File without changes
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/converter.py
ADDED
|
@@ -0,0 +1,597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging as log
|
| 8 |
+
import os
|
| 9 |
+
import os.path as osp
|
| 10 |
+
from enum import Enum
|
| 11 |
+
from itertools import groupby
|
| 12 |
+
|
| 13 |
+
import pycocotools.mask as mask_utils
|
| 14 |
+
|
| 15 |
+
import datumaro.util.annotation_util as anno_tools
|
| 16 |
+
import datumaro.util.mask_tools as mask_tools
|
| 17 |
+
from datumaro.components.converter import Converter
|
| 18 |
+
from datumaro.components.extractor import (_COORDINATE_ROUNDING_DIGITS,
|
| 19 |
+
DEFAULT_SUBSET_NAME, AnnotationType, Points)
|
| 20 |
+
from datumaro.util import cast, find, str_to_bool
|
| 21 |
+
|
| 22 |
+
from .format import CocoPath, CocoTask
|
| 23 |
+
|
| 24 |
+
SegmentationMode = Enum('SegmentationMode', ['guess', 'polygons', 'mask'])
|
| 25 |
+
|
| 26 |
+
class _TaskConverter:
|
| 27 |
+
def __init__(self, context):
|
| 28 |
+
self._min_ann_id = 1
|
| 29 |
+
self._context = context
|
| 30 |
+
|
| 31 |
+
data = {
|
| 32 |
+
'licenses': [],
|
| 33 |
+
'info': {},
|
| 34 |
+
'categories': [],
|
| 35 |
+
'images': [],
|
| 36 |
+
'annotations': []
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
data['licenses'].append({
|
| 40 |
+
'name': '',
|
| 41 |
+
'id': 0,
|
| 42 |
+
'url': ''
|
| 43 |
+
})
|
| 44 |
+
|
| 45 |
+
data['info'] = {
|
| 46 |
+
'contributor': '',
|
| 47 |
+
'date_created': '',
|
| 48 |
+
'description': '',
|
| 49 |
+
'url': '',
|
| 50 |
+
'version': '',
|
| 51 |
+
'year': ''
|
| 52 |
+
}
|
| 53 |
+
self._data = data
|
| 54 |
+
|
| 55 |
+
def is_empty(self):
|
| 56 |
+
return len(self._data['annotations']) == 0
|
| 57 |
+
|
| 58 |
+
def _get_image_id(self, item):
|
| 59 |
+
return self._context._get_image_id(item)
|
| 60 |
+
|
| 61 |
+
def save_image_info(self, item, filename):
|
| 62 |
+
if item.has_image:
|
| 63 |
+
h, w = item.image.size
|
| 64 |
+
else:
|
| 65 |
+
h = 0
|
| 66 |
+
w = 0
|
| 67 |
+
|
| 68 |
+
self._data['images'].append({
|
| 69 |
+
'id': self._get_image_id(item),
|
| 70 |
+
'width': int(w),
|
| 71 |
+
'height': int(h),
|
| 72 |
+
'file_name': cast(filename, str, ''),
|
| 73 |
+
'license': 0,
|
| 74 |
+
'flickr_url': '',
|
| 75 |
+
'coco_url': '',
|
| 76 |
+
'date_captured': 0,
|
| 77 |
+
})
|
| 78 |
+
|
| 79 |
+
def save_categories(self, dataset):
|
| 80 |
+
raise NotImplementedError()
|
| 81 |
+
|
| 82 |
+
def save_annotations(self, item):
|
| 83 |
+
raise NotImplementedError()
|
| 84 |
+
|
| 85 |
+
def write(self, path):
|
| 86 |
+
next_id = self._min_ann_id
|
| 87 |
+
for ann in self.annotations:
|
| 88 |
+
if not ann['id']:
|
| 89 |
+
ann['id'] = next_id
|
| 90 |
+
next_id += 1
|
| 91 |
+
|
| 92 |
+
with open(path, 'w') as outfile:
|
| 93 |
+
json.dump(self._data, outfile)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def annotations(self):
|
| 97 |
+
return self._data['annotations']
|
| 98 |
+
|
| 99 |
+
@property
|
| 100 |
+
def categories(self):
|
| 101 |
+
return self._data['categories']
|
| 102 |
+
|
| 103 |
+
def _get_ann_id(self, annotation):
|
| 104 |
+
ann_id = 0 if self._context._reindex else annotation.id
|
| 105 |
+
if ann_id:
|
| 106 |
+
self._min_ann_id = max(ann_id, self._min_ann_id)
|
| 107 |
+
return ann_id
|
| 108 |
+
|
| 109 |
+
@staticmethod
|
| 110 |
+
def _convert_attributes(ann):
|
| 111 |
+
return { k: v for k, v in ann.attributes.items()
|
| 112 |
+
if k not in {'is_crowd', 'score'}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
class _ImageInfoConverter(_TaskConverter):
|
| 116 |
+
def is_empty(self):
|
| 117 |
+
return len(self._data['images']) == 0
|
| 118 |
+
|
| 119 |
+
def save_categories(self, dataset):
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
def save_annotations(self, item):
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
class _CaptionsConverter(_TaskConverter):
|
| 126 |
+
def save_categories(self, dataset):
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
def save_annotations(self, item):
|
| 130 |
+
for ann_idx, ann in enumerate(item.annotations):
|
| 131 |
+
if ann.type != AnnotationType.caption:
|
| 132 |
+
continue
|
| 133 |
+
|
| 134 |
+
elem = {
|
| 135 |
+
'id': self._get_ann_id(ann),
|
| 136 |
+
'image_id': self._get_image_id(item),
|
| 137 |
+
'category_id': 0, # NOTE: workaround for a bug in cocoapi
|
| 138 |
+
'caption': ann.caption,
|
| 139 |
+
}
|
| 140 |
+
if 'score' in ann.attributes:
|
| 141 |
+
try:
|
| 142 |
+
elem['score'] = float(ann.attributes['score'])
|
| 143 |
+
except Exception as e:
|
| 144 |
+
log.warning("Item '%s', ann #%s: failed to convert "
|
| 145 |
+
"attribute 'score': %e" % (item.id, ann_idx, e))
|
| 146 |
+
if self._context._allow_attributes:
|
| 147 |
+
elem['attributes'] = self._convert_attributes(ann)
|
| 148 |
+
|
| 149 |
+
self.annotations.append(elem)
|
| 150 |
+
|
| 151 |
+
class _InstancesConverter(_TaskConverter):
|
| 152 |
+
def save_categories(self, dataset):
|
| 153 |
+
label_categories = dataset.categories().get(AnnotationType.label)
|
| 154 |
+
if label_categories is None:
|
| 155 |
+
return
|
| 156 |
+
|
| 157 |
+
for idx, cat in enumerate(label_categories.items):
|
| 158 |
+
self.categories.append({
|
| 159 |
+
'id': 1 + idx,
|
| 160 |
+
'name': cast(cat.name, str, ''),
|
| 161 |
+
'supercategory': cast(cat.parent, str, ''),
|
| 162 |
+
})
|
| 163 |
+
|
| 164 |
+
@classmethod
|
| 165 |
+
def crop_segments(cls, instances, img_width, img_height):
|
| 166 |
+
instances = sorted(instances, key=lambda x: x[0].z_order)
|
| 167 |
+
|
| 168 |
+
segment_map = []
|
| 169 |
+
segments = []
|
| 170 |
+
for inst_idx, (_, polygons, mask, _) in enumerate(instances):
|
| 171 |
+
if polygons:
|
| 172 |
+
segment_map.extend(inst_idx for p in polygons)
|
| 173 |
+
segments.extend(polygons)
|
| 174 |
+
elif mask is not None:
|
| 175 |
+
segment_map.append(inst_idx)
|
| 176 |
+
segments.append(mask)
|
| 177 |
+
|
| 178 |
+
segments = mask_tools.crop_covered_segments(
|
| 179 |
+
segments, img_width, img_height)
|
| 180 |
+
|
| 181 |
+
for inst_idx, inst in enumerate(instances):
|
| 182 |
+
new_segments = [s for si_id, s in zip(segment_map, segments)
|
| 183 |
+
if si_id == inst_idx]
|
| 184 |
+
|
| 185 |
+
if not new_segments:
|
| 186 |
+
inst[1] = []
|
| 187 |
+
inst[2] = None
|
| 188 |
+
continue
|
| 189 |
+
|
| 190 |
+
if inst[1]:
|
| 191 |
+
inst[1] = sum(new_segments, [])
|
| 192 |
+
else:
|
| 193 |
+
mask = mask_tools.merge_masks(new_segments)
|
| 194 |
+
inst[2] = mask_tools.mask_to_rle(mask)
|
| 195 |
+
|
| 196 |
+
return instances
|
| 197 |
+
|
| 198 |
+
def find_instance_parts(self, group, img_width, img_height):
|
| 199 |
+
boxes = [a for a in group if a.type == AnnotationType.bbox]
|
| 200 |
+
polygons = [a for a in group if a.type == AnnotationType.polygon]
|
| 201 |
+
masks = [a for a in group if a.type == AnnotationType.mask]
|
| 202 |
+
|
| 203 |
+
anns = boxes + polygons + masks
|
| 204 |
+
leader = anno_tools.find_group_leader(anns)
|
| 205 |
+
bbox = anno_tools.max_bbox(anns)
|
| 206 |
+
mask = None
|
| 207 |
+
polygons = [p.points for p in polygons]
|
| 208 |
+
|
| 209 |
+
if self._context._segmentation_mode == SegmentationMode.guess:
|
| 210 |
+
use_masks = True == leader.attributes.get('is_crowd',
|
| 211 |
+
find(masks, lambda x: x.label == leader.label) is not None)
|
| 212 |
+
elif self._context._segmentation_mode == SegmentationMode.polygons:
|
| 213 |
+
use_masks = False
|
| 214 |
+
elif self._context._segmentation_mode == SegmentationMode.mask:
|
| 215 |
+
use_masks = True
|
| 216 |
+
else:
|
| 217 |
+
raise NotImplementedError("Unexpected segmentation mode '%s'" % \
|
| 218 |
+
self._context._segmentation_mode)
|
| 219 |
+
|
| 220 |
+
if use_masks:
|
| 221 |
+
if polygons:
|
| 222 |
+
mask = mask_tools.rles_to_mask(polygons, img_width, img_height)
|
| 223 |
+
|
| 224 |
+
if masks:
|
| 225 |
+
if mask is not None:
|
| 226 |
+
masks += [mask]
|
| 227 |
+
mask = mask_tools.merge_masks([m.image for m in masks])
|
| 228 |
+
|
| 229 |
+
if mask is not None:
|
| 230 |
+
mask = mask_tools.mask_to_rle(mask)
|
| 231 |
+
polygons = []
|
| 232 |
+
else:
|
| 233 |
+
if masks:
|
| 234 |
+
mask = mask_tools.merge_masks([m.image for m in masks])
|
| 235 |
+
polygons += mask_tools.mask_to_polygons(mask)
|
| 236 |
+
mask = None
|
| 237 |
+
|
| 238 |
+
return [leader, polygons, mask, bbox]
|
| 239 |
+
|
| 240 |
+
@staticmethod
|
| 241 |
+
def find_instance_anns(annotations):
|
| 242 |
+
return [a for a in annotations
|
| 243 |
+
if a.type in { AnnotationType.bbox,
|
| 244 |
+
AnnotationType.polygon, AnnotationType.mask }
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
@classmethod
|
| 248 |
+
def find_instances(cls, annotations):
|
| 249 |
+
return anno_tools.find_instances(cls.find_instance_anns(annotations))
|
| 250 |
+
|
| 251 |
+
def save_annotations(self, item):
|
| 252 |
+
instances = self.find_instances(item.annotations)
|
| 253 |
+
if not instances:
|
| 254 |
+
return
|
| 255 |
+
|
| 256 |
+
if not item.has_image:
|
| 257 |
+
log.warn("Item '%s': skipping writing instances "
|
| 258 |
+
"since no image info available" % item.id)
|
| 259 |
+
return
|
| 260 |
+
h, w = item.image.size
|
| 261 |
+
instances = [self.find_instance_parts(i, w, h) for i in instances]
|
| 262 |
+
|
| 263 |
+
if self._context._crop_covered:
|
| 264 |
+
instances = self.crop_segments(instances, w, h)
|
| 265 |
+
|
| 266 |
+
for instance in instances:
|
| 267 |
+
elem = self.convert_instance(instance, item)
|
| 268 |
+
if elem:
|
| 269 |
+
self.annotations.append(elem)
|
| 270 |
+
|
| 271 |
+
def convert_instance(self, instance, item):
|
| 272 |
+
ann, polygons, mask, bbox = instance
|
| 273 |
+
|
| 274 |
+
is_crowd = mask is not None
|
| 275 |
+
if is_crowd:
|
| 276 |
+
segmentation = {
|
| 277 |
+
'counts': list(int(c) for c in mask['counts']),
|
| 278 |
+
'size': list(int(c) for c in mask['size'])
|
| 279 |
+
}
|
| 280 |
+
else:
|
| 281 |
+
segmentation = [list(map(float, p)) for p in polygons]
|
| 282 |
+
|
| 283 |
+
area = 0
|
| 284 |
+
if segmentation:
|
| 285 |
+
if item.has_image:
|
| 286 |
+
h, w = item.image.size
|
| 287 |
+
else:
|
| 288 |
+
# NOTE: here we can guess the image size as
|
| 289 |
+
# it is only needed for the area computation
|
| 290 |
+
w = bbox[0] + bbox[2]
|
| 291 |
+
h = bbox[1] + bbox[3]
|
| 292 |
+
|
| 293 |
+
rles = mask_utils.frPyObjects(segmentation, h, w)
|
| 294 |
+
if is_crowd:
|
| 295 |
+
rles = [rles]
|
| 296 |
+
else:
|
| 297 |
+
rles = mask_utils.merge(rles)
|
| 298 |
+
area = mask_utils.area(rles)
|
| 299 |
+
else:
|
| 300 |
+
_, _, w, h = bbox
|
| 301 |
+
segmentation = []
|
| 302 |
+
area = w * h
|
| 303 |
+
|
| 304 |
+
elem = {
|
| 305 |
+
'id': self._get_ann_id(ann),
|
| 306 |
+
'image_id': self._get_image_id(item),
|
| 307 |
+
'category_id': cast(ann.label, int, -1) + 1,
|
| 308 |
+
'segmentation': segmentation,
|
| 309 |
+
'area': float(area),
|
| 310 |
+
'bbox': [round(float(n), _COORDINATE_ROUNDING_DIGITS) for n in bbox],
|
| 311 |
+
'iscrowd': int(is_crowd),
|
| 312 |
+
}
|
| 313 |
+
if 'score' in ann.attributes:
|
| 314 |
+
try:
|
| 315 |
+
elem['score'] = float(ann.attributes['score'])
|
| 316 |
+
except Exception as e:
|
| 317 |
+
log.warning("Item '%s': failed to convert attribute "
|
| 318 |
+
"'score': %e" % (item.id, e))
|
| 319 |
+
if self._context._allow_attributes:
|
| 320 |
+
elem['attributes'] = self._convert_attributes(ann)
|
| 321 |
+
|
| 322 |
+
return elem
|
| 323 |
+
|
| 324 |
+
class _KeypointsConverter(_InstancesConverter):
|
| 325 |
+
def save_categories(self, dataset):
|
| 326 |
+
label_categories = dataset.categories().get(AnnotationType.label)
|
| 327 |
+
if label_categories is None:
|
| 328 |
+
return
|
| 329 |
+
point_categories = dataset.categories().get(AnnotationType.points)
|
| 330 |
+
|
| 331 |
+
for idx, label_cat in enumerate(label_categories.items):
|
| 332 |
+
cat = {
|
| 333 |
+
'id': 1 + idx,
|
| 334 |
+
'name': cast(label_cat.name, str, ''),
|
| 335 |
+
'supercategory': cast(label_cat.parent, str, ''),
|
| 336 |
+
'keypoints': [],
|
| 337 |
+
'skeleton': [],
|
| 338 |
+
}
|
| 339 |
+
|
| 340 |
+
if point_categories is not None:
|
| 341 |
+
kp_cat = point_categories.items.get(idx)
|
| 342 |
+
if kp_cat is not None:
|
| 343 |
+
cat.update({
|
| 344 |
+
'keypoints': [str(l) for l in kp_cat.labels],
|
| 345 |
+
'skeleton': [list(map(int, j)) for j in kp_cat.joints],
|
| 346 |
+
})
|
| 347 |
+
self.categories.append(cat)
|
| 348 |
+
|
| 349 |
+
def save_annotations(self, item):
|
| 350 |
+
point_annotations = [a for a in item.annotations
|
| 351 |
+
if a.type == AnnotationType.points]
|
| 352 |
+
if not point_annotations:
|
| 353 |
+
return
|
| 354 |
+
|
| 355 |
+
# Create annotations for solitary keypoints annotations
|
| 356 |
+
for points in self.find_solitary_points(item.annotations):
|
| 357 |
+
instance = [points, [], None, points.get_bbox()]
|
| 358 |
+
elem = super().convert_instance(instance, item)
|
| 359 |
+
elem.update(self.convert_points_object(points))
|
| 360 |
+
self.annotations.append(elem)
|
| 361 |
+
|
| 362 |
+
# Create annotations for complete instance + keypoints annotations
|
| 363 |
+
super().save_annotations(item)
|
| 364 |
+
|
| 365 |
+
@classmethod
|
| 366 |
+
def find_solitary_points(cls, annotations):
|
| 367 |
+
annotations = sorted(annotations, key=lambda a: a.group)
|
| 368 |
+
solitary_points = []
|
| 369 |
+
|
| 370 |
+
for g_id, group in groupby(annotations, lambda a: a.group):
|
| 371 |
+
if not g_id or g_id and not cls.find_instance_anns(group):
|
| 372 |
+
group = [a for a in group if a.type == AnnotationType.points]
|
| 373 |
+
solitary_points.extend(group)
|
| 374 |
+
|
| 375 |
+
return solitary_points
|
| 376 |
+
|
| 377 |
+
@staticmethod
|
| 378 |
+
def convert_points_object(ann):
|
| 379 |
+
keypoints = []
|
| 380 |
+
points = ann.points
|
| 381 |
+
visibility = ann.visibility
|
| 382 |
+
for index in range(0, len(points), 2):
|
| 383 |
+
kp = points[index : index + 2]
|
| 384 |
+
state = visibility[index // 2].value
|
| 385 |
+
keypoints.extend([*kp, state])
|
| 386 |
+
|
| 387 |
+
num_annotated = len([v for v in visibility \
|
| 388 |
+
if v != Points.Visibility.absent])
|
| 389 |
+
|
| 390 |
+
return {
|
| 391 |
+
'keypoints': keypoints,
|
| 392 |
+
'num_keypoints': num_annotated,
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
def convert_instance(self, instance, item):
|
| 396 |
+
points_ann = find(item.annotations, lambda x: \
|
| 397 |
+
x.type == AnnotationType.points and \
|
| 398 |
+
instance[0].group and x.group == instance[0].group)
|
| 399 |
+
if not points_ann:
|
| 400 |
+
return None
|
| 401 |
+
|
| 402 |
+
elem = super().convert_instance(instance, item)
|
| 403 |
+
elem.update(self.convert_points_object(points_ann))
|
| 404 |
+
|
| 405 |
+
return elem
|
| 406 |
+
|
| 407 |
+
class _LabelsConverter(_TaskConverter):
|
| 408 |
+
def save_categories(self, dataset):
|
| 409 |
+
label_categories = dataset.categories().get(AnnotationType.label)
|
| 410 |
+
if label_categories is None:
|
| 411 |
+
return
|
| 412 |
+
|
| 413 |
+
for idx, cat in enumerate(label_categories.items):
|
| 414 |
+
self.categories.append({
|
| 415 |
+
'id': 1 + idx,
|
| 416 |
+
'name': cast(cat.name, str, ''),
|
| 417 |
+
'supercategory': cast(cat.parent, str, ''),
|
| 418 |
+
})
|
| 419 |
+
|
| 420 |
+
def save_annotations(self, item):
|
| 421 |
+
for ann in item.annotations:
|
| 422 |
+
if ann.type != AnnotationType.label:
|
| 423 |
+
continue
|
| 424 |
+
|
| 425 |
+
elem = {
|
| 426 |
+
'id': self._get_ann_id(ann),
|
| 427 |
+
'image_id': self._get_image_id(item),
|
| 428 |
+
'category_id': int(ann.label) + 1,
|
| 429 |
+
}
|
| 430 |
+
if 'score' in ann.attributes:
|
| 431 |
+
try:
|
| 432 |
+
elem['score'] = float(ann.attributes['score'])
|
| 433 |
+
except Exception as e:
|
| 434 |
+
log.warning("Item '%s': failed to convert attribute "
|
| 435 |
+
"'score': %e" % (item.id, e))
|
| 436 |
+
if self._context._allow_attributes:
|
| 437 |
+
elem['attributes'] = self._convert_attributes(ann)
|
| 438 |
+
|
| 439 |
+
self.annotations.append(elem)
|
| 440 |
+
|
| 441 |
+
class CocoConverter(Converter):
|
| 442 |
+
@staticmethod
|
| 443 |
+
def _split_tasks_string(s):
|
| 444 |
+
return [CocoTask[i.strip()] for i in s.split(',')]
|
| 445 |
+
|
| 446 |
+
@classmethod
|
| 447 |
+
def build_cmdline_parser(cls, **kwargs):
|
| 448 |
+
parser = super().build_cmdline_parser(**kwargs)
|
| 449 |
+
parser.add_argument('--segmentation-mode',
|
| 450 |
+
choices=[m.name for m in SegmentationMode],
|
| 451 |
+
default=SegmentationMode.guess.name,
|
| 452 |
+
help="""
|
| 453 |
+
Save mode for instance segmentation:|n
|
| 454 |
+
- '{sm.guess.name}': guess the mode for each instance,|n
|
| 455 |
+
|s|suse 'is_crowd' attribute as hint|n
|
| 456 |
+
- '{sm.polygons.name}': save polygons,|n
|
| 457 |
+
|s|smerge and convert masks, prefer polygons|n
|
| 458 |
+
- '{sm.mask.name}': save masks,|n
|
| 459 |
+
|s|smerge and convert polygons, prefer masks|n
|
| 460 |
+
Default: %(default)s.
|
| 461 |
+
""".format(sm=SegmentationMode))
|
| 462 |
+
parser.add_argument('--crop-covered', action='store_true',
|
| 463 |
+
help="Crop covered segments so that background objects' "
|
| 464 |
+
"segmentation was more accurate (default: %(default)s)")
|
| 465 |
+
parser.add_argument('--allow-attributes',
|
| 466 |
+
type=str_to_bool, default=True,
|
| 467 |
+
help="Allow export of attributes (default: %(default)s)")
|
| 468 |
+
parser.add_argument('--reindex', action='store_true',
|
| 469 |
+
help="Assign new indices to images and annotations "
|
| 470 |
+
"(default: %(default)s)")
|
| 471 |
+
parser.add_argument('--tasks', type=cls._split_tasks_string,
|
| 472 |
+
help="COCO task filter, comma-separated list of {%s} "
|
| 473 |
+
"(default: all)" % ', '.join(t.name for t in CocoTask))
|
| 474 |
+
return parser
|
| 475 |
+
|
| 476 |
+
DEFAULT_IMAGE_EXT = CocoPath.IMAGE_EXT
|
| 477 |
+
|
| 478 |
+
_TASK_CONVERTER = {
|
| 479 |
+
CocoTask.image_info: _ImageInfoConverter,
|
| 480 |
+
CocoTask.instances: _InstancesConverter,
|
| 481 |
+
CocoTask.person_keypoints: _KeypointsConverter,
|
| 482 |
+
CocoTask.captions: _CaptionsConverter,
|
| 483 |
+
CocoTask.labels: _LabelsConverter,
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
def __init__(self, extractor, save_dir,
|
| 487 |
+
tasks=None, segmentation_mode=None, crop_covered=False,
|
| 488 |
+
allow_attributes=True, reindex=False, **kwargs):
|
| 489 |
+
super().__init__(extractor, save_dir, **kwargs)
|
| 490 |
+
|
| 491 |
+
assert tasks is None or isinstance(tasks, (CocoTask, list, str))
|
| 492 |
+
if isinstance(tasks, CocoTask):
|
| 493 |
+
tasks = [tasks]
|
| 494 |
+
elif isinstance(tasks, str):
|
| 495 |
+
tasks = [CocoTask[tasks]]
|
| 496 |
+
elif tasks:
|
| 497 |
+
for i, t in enumerate(tasks):
|
| 498 |
+
if isinstance(t, str):
|
| 499 |
+
tasks[i] = CocoTask[t]
|
| 500 |
+
else:
|
| 501 |
+
assert t in CocoTask, t
|
| 502 |
+
self._tasks = tasks
|
| 503 |
+
|
| 504 |
+
assert segmentation_mode is None or \
|
| 505 |
+
isinstance(segmentation_mode, str) or \
|
| 506 |
+
segmentation_mode in SegmentationMode
|
| 507 |
+
if segmentation_mode is None:
|
| 508 |
+
segmentation_mode = SegmentationMode.guess
|
| 509 |
+
if isinstance(segmentation_mode, str):
|
| 510 |
+
segmentation_mode = SegmentationMode[segmentation_mode]
|
| 511 |
+
self._segmentation_mode = segmentation_mode
|
| 512 |
+
|
| 513 |
+
self._crop_covered = crop_covered
|
| 514 |
+
self._allow_attributes = allow_attributes
|
| 515 |
+
self._reindex = reindex
|
| 516 |
+
|
| 517 |
+
self._image_ids = {}
|
| 518 |
+
|
| 519 |
+
def _make_dirs(self):
|
| 520 |
+
self._images_dir = osp.join(self._save_dir, CocoPath.IMAGES_DIR)
|
| 521 |
+
os.makedirs(self._images_dir, exist_ok=True)
|
| 522 |
+
|
| 523 |
+
self._ann_dir = osp.join(self._save_dir, CocoPath.ANNOTATIONS_DIR)
|
| 524 |
+
os.makedirs(self._ann_dir, exist_ok=True)
|
| 525 |
+
|
| 526 |
+
def _make_task_converter(self, task):
|
| 527 |
+
if task not in self._TASK_CONVERTER:
|
| 528 |
+
raise NotImplementedError()
|
| 529 |
+
return self._TASK_CONVERTER[task](self)
|
| 530 |
+
|
| 531 |
+
def _make_task_converters(self):
|
| 532 |
+
return { task: self._make_task_converter(task)
|
| 533 |
+
for task in (self._tasks or self._TASK_CONVERTER) }
|
| 534 |
+
|
| 535 |
+
def _get_image_id(self, item):
|
| 536 |
+
image_id = self._image_ids.get(item.id)
|
| 537 |
+
if image_id is None:
|
| 538 |
+
if not self._reindex:
|
| 539 |
+
image_id = cast(item.attributes.get('id'), int,
|
| 540 |
+
len(self._image_ids) + 1)
|
| 541 |
+
else:
|
| 542 |
+
image_id = len(self._image_ids) + 1
|
| 543 |
+
self._image_ids[item.id] = image_id
|
| 544 |
+
return image_id
|
| 545 |
+
|
| 546 |
+
def _save_image(self, item, path=None):
|
| 547 |
+
super()._save_image(item,
|
| 548 |
+
osp.join(self._images_dir, self._make_image_filename(item)))
|
| 549 |
+
|
| 550 |
+
def apply(self):
|
| 551 |
+
self._make_dirs()
|
| 552 |
+
|
| 553 |
+
for subset_name, subset in self._extractor.subsets().items():
|
| 554 |
+
task_converters = self._make_task_converters()
|
| 555 |
+
for task_conv in task_converters.values():
|
| 556 |
+
task_conv.save_categories(subset)
|
| 557 |
+
for item in subset:
|
| 558 |
+
if self._save_images:
|
| 559 |
+
if item.has_image:
|
| 560 |
+
self._save_image(item)
|
| 561 |
+
else:
|
| 562 |
+
log.debug("Item '%s' has no image info", item.id)
|
| 563 |
+
for task_conv in task_converters.values():
|
| 564 |
+
task_conv.save_image_info(item,
|
| 565 |
+
self._make_image_filename(item))
|
| 566 |
+
task_conv.save_annotations(item)
|
| 567 |
+
|
| 568 |
+
for task, task_conv in task_converters.items():
|
| 569 |
+
if task_conv.is_empty() and not self._tasks:
|
| 570 |
+
continue
|
| 571 |
+
task_conv.write(osp.join(self._ann_dir,
|
| 572 |
+
'%s_%s.json' % (task.name, subset_name)))
|
| 573 |
+
|
| 574 |
+
class CocoInstancesConverter(CocoConverter):
|
| 575 |
+
def __init__(self, *args, **kwargs):
|
| 576 |
+
kwargs['tasks'] = CocoTask.instances
|
| 577 |
+
super().__init__(*args, **kwargs)
|
| 578 |
+
|
| 579 |
+
class CocoImageInfoConverter(CocoConverter):
|
| 580 |
+
def __init__(self, *args, **kwargs):
|
| 581 |
+
kwargs['tasks'] = CocoTask.image_info
|
| 582 |
+
super().__init__(*args, **kwargs)
|
| 583 |
+
|
| 584 |
+
class CocoPersonKeypointsConverter(CocoConverter):
|
| 585 |
+
def __init__(self, *args, **kwargs):
|
| 586 |
+
kwargs['tasks'] = CocoTask.person_keypoints
|
| 587 |
+
super().__init__(*args, **kwargs)
|
| 588 |
+
|
| 589 |
+
class CocoCaptionsConverter(CocoConverter):
|
| 590 |
+
def __init__(self, *args, **kwargs):
|
| 591 |
+
kwargs['tasks'] = CocoTask.captions
|
| 592 |
+
super().__init__(*args, **kwargs)
|
| 593 |
+
|
| 594 |
+
class CocoLabelsConverter(CocoConverter):
|
| 595 |
+
def __init__(self, *args, **kwargs):
|
| 596 |
+
kwargs['tasks'] = CocoTask.labels
|
| 597 |
+
super().__init__(*args, **kwargs)
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/extractor.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import logging as log
|
| 8 |
+
import os.path as osp
|
| 9 |
+
|
| 10 |
+
from pycocotools.coco import COCO
|
| 11 |
+
import pycocotools.mask as mask_utils
|
| 12 |
+
|
| 13 |
+
from datumaro.components.extractor import (SourceExtractor,
|
| 14 |
+
DEFAULT_SUBSET_NAME, DatasetItem,
|
| 15 |
+
AnnotationType, Label, RleMask, Points, Polygon, Bbox, Caption,
|
| 16 |
+
LabelCategories, PointsCategories
|
| 17 |
+
)
|
| 18 |
+
from datumaro.util.image import Image
|
| 19 |
+
|
| 20 |
+
from .format import CocoTask, CocoPath
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class _CocoExtractor(SourceExtractor):
|
| 24 |
+
def __init__(self, path, task, merge_instance_polygons=False):
|
| 25 |
+
assert osp.isfile(path), path
|
| 26 |
+
|
| 27 |
+
subset = osp.splitext(osp.basename(path))[0].rsplit('_', maxsplit=1)
|
| 28 |
+
subset = subset[1] if len(subset) == 2 else None
|
| 29 |
+
super().__init__(subset=subset)
|
| 30 |
+
|
| 31 |
+
rootpath = ''
|
| 32 |
+
if path.endswith(osp.join(CocoPath.ANNOTATIONS_DIR, osp.basename(path))):
|
| 33 |
+
rootpath = path.rsplit(CocoPath.ANNOTATIONS_DIR, maxsplit=1)[0]
|
| 34 |
+
images_dir = ''
|
| 35 |
+
if rootpath and osp.isdir(osp.join(rootpath, CocoPath.IMAGES_DIR)):
|
| 36 |
+
images_dir = osp.join(rootpath, CocoPath.IMAGES_DIR)
|
| 37 |
+
if osp.isdir(osp.join(images_dir, subset or DEFAULT_SUBSET_NAME)):
|
| 38 |
+
images_dir = osp.join(images_dir, subset or DEFAULT_SUBSET_NAME)
|
| 39 |
+
self._images_dir = images_dir
|
| 40 |
+
self._task = task
|
| 41 |
+
|
| 42 |
+
self._merge_instance_polygons = merge_instance_polygons
|
| 43 |
+
|
| 44 |
+
loader = self._make_subset_loader(path)
|
| 45 |
+
self._load_categories(loader)
|
| 46 |
+
self._items = list(self._load_items(loader).values())
|
| 47 |
+
|
| 48 |
+
@staticmethod
|
| 49 |
+
def _make_subset_loader(path):
|
| 50 |
+
# COCO API has an 'unclosed file' warning
|
| 51 |
+
coco_api = COCO()
|
| 52 |
+
with open(path, 'r') as f:
|
| 53 |
+
import json
|
| 54 |
+
dataset = json.load(f)
|
| 55 |
+
|
| 56 |
+
coco_api.dataset = dataset
|
| 57 |
+
coco_api.createIndex()
|
| 58 |
+
return coco_api
|
| 59 |
+
|
| 60 |
+
def _load_categories(self, loader):
|
| 61 |
+
self._categories = {}
|
| 62 |
+
|
| 63 |
+
if self._task in [CocoTask.instances, CocoTask.labels,
|
| 64 |
+
CocoTask.person_keypoints,
|
| 65 |
+
# TODO: Task.stuff, CocoTask.panoptic
|
| 66 |
+
]:
|
| 67 |
+
label_categories, label_map = self._load_label_categories(loader)
|
| 68 |
+
self._categories[AnnotationType.label] = label_categories
|
| 69 |
+
self._label_map = label_map
|
| 70 |
+
|
| 71 |
+
if self._task == CocoTask.person_keypoints:
|
| 72 |
+
person_kp_categories = self._load_person_kp_categories(loader)
|
| 73 |
+
self._categories[AnnotationType.points] = person_kp_categories
|
| 74 |
+
|
| 75 |
+
# pylint: disable=no-self-use
|
| 76 |
+
def _load_label_categories(self, loader):
|
| 77 |
+
catIds = loader.getCatIds()
|
| 78 |
+
cats = loader.loadCats(catIds)
|
| 79 |
+
|
| 80 |
+
categories = LabelCategories()
|
| 81 |
+
label_map = {}
|
| 82 |
+
for idx, cat in enumerate(cats):
|
| 83 |
+
label_map[cat['id']] = idx
|
| 84 |
+
categories.add(name=cat['name'], parent=cat.get('supercategory'))
|
| 85 |
+
|
| 86 |
+
return categories, label_map
|
| 87 |
+
# pylint: enable=no-self-use
|
| 88 |
+
|
| 89 |
+
def _load_person_kp_categories(self, loader):
|
| 90 |
+
catIds = loader.getCatIds()
|
| 91 |
+
cats = loader.loadCats(catIds)
|
| 92 |
+
|
| 93 |
+
categories = PointsCategories()
|
| 94 |
+
for cat in cats:
|
| 95 |
+
label_id = self._label_map[cat['id']]
|
| 96 |
+
categories.add(label_id=label_id,
|
| 97 |
+
labels=cat['keypoints'], joints=cat['skeleton']
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
return categories
|
| 101 |
+
|
| 102 |
+
def _load_items(self, loader):
|
| 103 |
+
items = OrderedDict()
|
| 104 |
+
|
| 105 |
+
for img_id in loader.getImgIds():
|
| 106 |
+
image_info = loader.loadImgs(img_id)[0]
|
| 107 |
+
image_path = osp.join(self._images_dir, image_info['file_name'])
|
| 108 |
+
image_size = (image_info.get('height'), image_info.get('width'))
|
| 109 |
+
if all(image_size):
|
| 110 |
+
image_size = (int(image_size[0]), int(image_size[1]))
|
| 111 |
+
else:
|
| 112 |
+
image_size = None
|
| 113 |
+
image = Image(path=image_path, size=image_size)
|
| 114 |
+
|
| 115 |
+
anns = loader.getAnnIds(imgIds=img_id)
|
| 116 |
+
anns = loader.loadAnns(anns)
|
| 117 |
+
anns = sum((self._load_annotations(a, image_info) for a in anns), [])
|
| 118 |
+
|
| 119 |
+
items[img_id] = DatasetItem(
|
| 120 |
+
id=osp.splitext(image_info['file_name'])[0],
|
| 121 |
+
subset=self._subset, image=image, annotations=anns,
|
| 122 |
+
attributes={'id': img_id})
|
| 123 |
+
|
| 124 |
+
return items
|
| 125 |
+
|
| 126 |
+
def _get_label_id(self, ann):
|
| 127 |
+
cat_id = ann.get('category_id')
|
| 128 |
+
if cat_id in [0, None]:
|
| 129 |
+
return None
|
| 130 |
+
return self._label_map[cat_id]
|
| 131 |
+
|
| 132 |
+
def _load_annotations(self, ann, image_info=None):
|
| 133 |
+
parsed_annotations = []
|
| 134 |
+
|
| 135 |
+
ann_id = ann.get('id')
|
| 136 |
+
|
| 137 |
+
attributes = {}
|
| 138 |
+
if 'attributes' in ann:
|
| 139 |
+
try:
|
| 140 |
+
attributes.update(ann['attributes'])
|
| 141 |
+
except Exception as e:
|
| 142 |
+
log.debug("item #%s: failed to read annotation attributes: %s",
|
| 143 |
+
image_info['id'], e)
|
| 144 |
+
if 'score' in ann:
|
| 145 |
+
attributes['score'] = ann['score']
|
| 146 |
+
|
| 147 |
+
group = ann_id # make sure all tasks' annotations are merged
|
| 148 |
+
|
| 149 |
+
if self._task in [CocoTask.instances, CocoTask.person_keypoints]:
|
| 150 |
+
x, y, w, h = ann['bbox']
|
| 151 |
+
label_id = self._get_label_id(ann)
|
| 152 |
+
|
| 153 |
+
is_crowd = bool(ann['iscrowd'])
|
| 154 |
+
attributes['is_crowd'] = is_crowd
|
| 155 |
+
|
| 156 |
+
if self._task is CocoTask.person_keypoints:
|
| 157 |
+
keypoints = ann['keypoints']
|
| 158 |
+
points = [p for i, p in enumerate(keypoints) if i % 3 != 2]
|
| 159 |
+
visibility = keypoints[2::3]
|
| 160 |
+
parsed_annotations.append(
|
| 161 |
+
Points(points, visibility, label=label_id,
|
| 162 |
+
id=ann_id, attributes=attributes, group=group)
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
segmentation = ann.get('segmentation')
|
| 166 |
+
if segmentation and segmentation != [[]]:
|
| 167 |
+
rle = None
|
| 168 |
+
|
| 169 |
+
if isinstance(segmentation, list):
|
| 170 |
+
if not self._merge_instance_polygons:
|
| 171 |
+
# polygon - a single object can consist of multiple parts
|
| 172 |
+
for polygon_points in segmentation:
|
| 173 |
+
parsed_annotations.append(Polygon(
|
| 174 |
+
points=polygon_points, label=label_id,
|
| 175 |
+
id=ann_id, attributes=attributes, group=group
|
| 176 |
+
))
|
| 177 |
+
else:
|
| 178 |
+
# merge all parts into a single mask RLE
|
| 179 |
+
img_h = image_info['height']
|
| 180 |
+
img_w = image_info['width']
|
| 181 |
+
rles = mask_utils.frPyObjects(segmentation, img_h, img_w)
|
| 182 |
+
rle = mask_utils.merge(rles)
|
| 183 |
+
elif isinstance(segmentation['counts'], list):
|
| 184 |
+
# uncompressed RLE
|
| 185 |
+
img_h = image_info['height']
|
| 186 |
+
img_w = image_info['width']
|
| 187 |
+
mask_h, mask_w = segmentation['size']
|
| 188 |
+
if img_h == mask_h and img_w == mask_w:
|
| 189 |
+
rle = mask_utils.frPyObjects(
|
| 190 |
+
[segmentation], mask_h, mask_w)[0]
|
| 191 |
+
else:
|
| 192 |
+
log.warning("item #%s: mask #%s "
|
| 193 |
+
"does not match image size: %s vs. %s. "
|
| 194 |
+
"Skipping this annotation.",
|
| 195 |
+
image_info['id'], ann_id,
|
| 196 |
+
(mask_h, mask_w), (img_h, img_w)
|
| 197 |
+
)
|
| 198 |
+
else:
|
| 199 |
+
# compressed RLE
|
| 200 |
+
rle = segmentation
|
| 201 |
+
|
| 202 |
+
if rle is not None:
|
| 203 |
+
parsed_annotations.append(RleMask(rle=rle, label=label_id,
|
| 204 |
+
id=ann_id, attributes=attributes, group=group
|
| 205 |
+
))
|
| 206 |
+
else:
|
| 207 |
+
parsed_annotations.append(
|
| 208 |
+
Bbox(x, y, w, h, label=label_id,
|
| 209 |
+
id=ann_id, attributes=attributes, group=group)
|
| 210 |
+
)
|
| 211 |
+
elif self._task is CocoTask.labels:
|
| 212 |
+
label_id = self._get_label_id(ann)
|
| 213 |
+
parsed_annotations.append(
|
| 214 |
+
Label(label=label_id,
|
| 215 |
+
id=ann_id, attributes=attributes, group=group)
|
| 216 |
+
)
|
| 217 |
+
elif self._task is CocoTask.captions:
|
| 218 |
+
caption = ann['caption']
|
| 219 |
+
parsed_annotations.append(
|
| 220 |
+
Caption(caption,
|
| 221 |
+
id=ann_id, attributes=attributes, group=group)
|
| 222 |
+
)
|
| 223 |
+
else:
|
| 224 |
+
raise NotImplementedError()
|
| 225 |
+
|
| 226 |
+
return parsed_annotations
|
| 227 |
+
|
| 228 |
+
class CocoImageInfoExtractor(_CocoExtractor):
|
| 229 |
+
def __init__(self, path, **kwargs):
|
| 230 |
+
kwargs['task'] = CocoTask.image_info
|
| 231 |
+
super().__init__(path, **kwargs)
|
| 232 |
+
|
| 233 |
+
class CocoCaptionsExtractor(_CocoExtractor):
|
| 234 |
+
def __init__(self, path, **kwargs):
|
| 235 |
+
kwargs['task'] = CocoTask.captions
|
| 236 |
+
super().__init__(path, **kwargs)
|
| 237 |
+
|
| 238 |
+
class CocoInstancesExtractor(_CocoExtractor):
|
| 239 |
+
def __init__(self, path, **kwargs):
|
| 240 |
+
kwargs['task'] = CocoTask.instances
|
| 241 |
+
super().__init__(path, **kwargs)
|
| 242 |
+
|
| 243 |
+
class CocoPersonKeypointsExtractor(_CocoExtractor):
|
| 244 |
+
def __init__(self, path, **kwargs):
|
| 245 |
+
kwargs['task'] = CocoTask.person_keypoints
|
| 246 |
+
super().__init__(path, **kwargs)
|
| 247 |
+
|
| 248 |
+
class CocoLabelsExtractor(_CocoExtractor):
|
| 249 |
+
def __init__(self, path, **kwargs):
|
| 250 |
+
kwargs['task'] = CocoTask.labels
|
| 251 |
+
super().__init__(path, **kwargs)
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/format.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from enum import Enum
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
CocoTask = Enum('CocoTask', [
|
| 10 |
+
'instances',
|
| 11 |
+
'person_keypoints',
|
| 12 |
+
'captions',
|
| 13 |
+
'labels', # extension, does not exist in the original COCO format
|
| 14 |
+
'image_info',
|
| 15 |
+
# 'panoptic',
|
| 16 |
+
# 'stuff',
|
| 17 |
+
])
|
| 18 |
+
|
| 19 |
+
class CocoPath:
|
| 20 |
+
IMAGES_DIR = 'images'
|
| 21 |
+
ANNOTATIONS_DIR = 'annotations'
|
| 22 |
+
|
| 23 |
+
IMAGE_EXT = '.jpg'
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/coco_format/importer.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
from glob import glob
|
| 8 |
+
import logging as log
|
| 9 |
+
import os.path as osp
|
| 10 |
+
|
| 11 |
+
from datumaro.components.extractor import Importer
|
| 12 |
+
from datumaro.util.log_utils import logging_disabled
|
| 13 |
+
|
| 14 |
+
from .format import CocoTask
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CocoImporter(Importer):
|
| 18 |
+
_COCO_EXTRACTORS = {
|
| 19 |
+
CocoTask.instances: 'coco_instances',
|
| 20 |
+
CocoTask.person_keypoints: 'coco_person_keypoints',
|
| 21 |
+
CocoTask.captions: 'coco_captions',
|
| 22 |
+
CocoTask.labels: 'coco_labels',
|
| 23 |
+
CocoTask.image_info: 'coco_image_info',
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
@classmethod
|
| 27 |
+
def detect(cls, path):
|
| 28 |
+
with logging_disabled(log.WARN):
|
| 29 |
+
return len(cls.find_sources(path)) != 0
|
| 30 |
+
|
| 31 |
+
def __call__(self, path, **extra_params):
|
| 32 |
+
from datumaro.components.project import Project # cyclic import
|
| 33 |
+
project = Project()
|
| 34 |
+
|
| 35 |
+
subsets = self.find_sources(path)
|
| 36 |
+
|
| 37 |
+
if len(subsets) == 0:
|
| 38 |
+
raise Exception("Failed to find 'coco' dataset at '%s'" % path)
|
| 39 |
+
|
| 40 |
+
# TODO: should be removed when proper label merging is implemented
|
| 41 |
+
conflicting_types = {CocoTask.instances,
|
| 42 |
+
CocoTask.person_keypoints, CocoTask.labels}
|
| 43 |
+
ann_types = set(t for s in subsets.values() for t in s) \
|
| 44 |
+
& conflicting_types
|
| 45 |
+
if 1 <= len(ann_types):
|
| 46 |
+
selected_ann_type = sorted(ann_types, key=lambda x: x.name)[0]
|
| 47 |
+
if 1 < len(ann_types):
|
| 48 |
+
log.warning("Not implemented: "
|
| 49 |
+
"Found potentially conflicting source types with labels: %s. "
|
| 50 |
+
"Only one type will be used: %s" \
|
| 51 |
+
% (", ".join(t.name for t in ann_types), selected_ann_type.name))
|
| 52 |
+
|
| 53 |
+
for ann_files in subsets.values():
|
| 54 |
+
for ann_type, ann_file in ann_files.items():
|
| 55 |
+
if ann_type in conflicting_types:
|
| 56 |
+
if ann_type is not selected_ann_type:
|
| 57 |
+
log.warning("Not implemented: "
|
| 58 |
+
"conflicting source '%s' is skipped." % ann_file)
|
| 59 |
+
continue
|
| 60 |
+
log.info("Found a dataset at '%s'" % ann_file)
|
| 61 |
+
|
| 62 |
+
source_name = osp.splitext(osp.basename(ann_file))[0]
|
| 63 |
+
project.add_source(source_name, {
|
| 64 |
+
'url': ann_file,
|
| 65 |
+
'format': self._COCO_EXTRACTORS[ann_type],
|
| 66 |
+
'options': dict(extra_params),
|
| 67 |
+
})
|
| 68 |
+
|
| 69 |
+
return project
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def find_sources(path):
|
| 73 |
+
if path.endswith('.json') and osp.isfile(path):
|
| 74 |
+
subset_paths = [path]
|
| 75 |
+
else:
|
| 76 |
+
subset_paths = glob(osp.join(path, '**', '*_*.json'),
|
| 77 |
+
recursive=True)
|
| 78 |
+
|
| 79 |
+
subsets = defaultdict(dict)
|
| 80 |
+
for subset_path in subset_paths:
|
| 81 |
+
name_parts = osp.splitext(osp.basename(subset_path))[0] \
|
| 82 |
+
.rsplit('_', maxsplit=1)
|
| 83 |
+
|
| 84 |
+
ann_type = name_parts[0]
|
| 85 |
+
try:
|
| 86 |
+
ann_type = CocoTask[ann_type]
|
| 87 |
+
except KeyError:
|
| 88 |
+
log.warn("Skipping '%s': unknown subset "
|
| 89 |
+
"type '%s', the only known are: %s" % \
|
| 90 |
+
(subset_path, ann_type,
|
| 91 |
+
', '.join([e.name for e in CocoTask])))
|
| 92 |
+
continue
|
| 93 |
+
subset_name = name_parts[1]
|
| 94 |
+
subsets[subset_name][ann_type] = subset_path
|
| 95 |
+
return dict(subsets)
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/__init__.py
ADDED
|
File without changes
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/converter.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import logging as log
|
| 7 |
+
import os
|
| 8 |
+
import os.path as osp
|
| 9 |
+
from collections import OrderedDict
|
| 10 |
+
from xml.sax.saxutils import XMLGenerator
|
| 11 |
+
|
| 12 |
+
from datumaro.components.converter import Converter
|
| 13 |
+
from datumaro.components.extractor import DEFAULT_SUBSET_NAME, AnnotationType
|
| 14 |
+
from datumaro.util import cast, pairs
|
| 15 |
+
|
| 16 |
+
from .format import CvatPath
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class XmlAnnotationWriter:
|
| 20 |
+
VERSION = '1.1'
|
| 21 |
+
|
| 22 |
+
def __init__(self, f):
|
| 23 |
+
self.xmlgen = XMLGenerator(f, 'utf-8')
|
| 24 |
+
self._level = 0
|
| 25 |
+
|
| 26 |
+
def _indent(self, newline = True):
|
| 27 |
+
if newline:
|
| 28 |
+
self.xmlgen.ignorableWhitespace('\n')
|
| 29 |
+
self.xmlgen.ignorableWhitespace(' ' * self._level)
|
| 30 |
+
|
| 31 |
+
def _add_version(self):
|
| 32 |
+
self._indent()
|
| 33 |
+
self.xmlgen.startElement('version', {})
|
| 34 |
+
self.xmlgen.characters(self.VERSION)
|
| 35 |
+
self.xmlgen.endElement('version')
|
| 36 |
+
|
| 37 |
+
def open_root(self):
|
| 38 |
+
self.xmlgen.startDocument()
|
| 39 |
+
self.xmlgen.startElement('annotations', {})
|
| 40 |
+
self._level += 1
|
| 41 |
+
self._add_version()
|
| 42 |
+
|
| 43 |
+
def _add_meta(self, meta):
|
| 44 |
+
self._level += 1
|
| 45 |
+
for k, v in meta.items():
|
| 46 |
+
if isinstance(v, OrderedDict):
|
| 47 |
+
self._indent()
|
| 48 |
+
self.xmlgen.startElement(k, {})
|
| 49 |
+
self._add_meta(v)
|
| 50 |
+
self._indent()
|
| 51 |
+
self.xmlgen.endElement(k)
|
| 52 |
+
elif isinstance(v, list):
|
| 53 |
+
self._indent()
|
| 54 |
+
self.xmlgen.startElement(k, {})
|
| 55 |
+
for tup in v:
|
| 56 |
+
self._add_meta(OrderedDict([tup]))
|
| 57 |
+
self._indent()
|
| 58 |
+
self.xmlgen.endElement(k)
|
| 59 |
+
else:
|
| 60 |
+
self._indent()
|
| 61 |
+
self.xmlgen.startElement(k, {})
|
| 62 |
+
self.xmlgen.characters(v)
|
| 63 |
+
self.xmlgen.endElement(k)
|
| 64 |
+
self._level -= 1
|
| 65 |
+
|
| 66 |
+
def write_meta(self, meta):
|
| 67 |
+
self._indent()
|
| 68 |
+
self.xmlgen.startElement('meta', {})
|
| 69 |
+
self._add_meta(meta)
|
| 70 |
+
self._indent()
|
| 71 |
+
self.xmlgen.endElement('meta')
|
| 72 |
+
|
| 73 |
+
def open_track(self, track):
|
| 74 |
+
self._indent()
|
| 75 |
+
self.xmlgen.startElement('track', track)
|
| 76 |
+
self._level += 1
|
| 77 |
+
|
| 78 |
+
def open_image(self, image):
|
| 79 |
+
self._indent()
|
| 80 |
+
self.xmlgen.startElement('image', image)
|
| 81 |
+
self._level += 1
|
| 82 |
+
|
| 83 |
+
def open_box(self, box):
|
| 84 |
+
self._indent()
|
| 85 |
+
self.xmlgen.startElement('box', box)
|
| 86 |
+
self._level += 1
|
| 87 |
+
|
| 88 |
+
def open_polygon(self, polygon):
|
| 89 |
+
self._indent()
|
| 90 |
+
self.xmlgen.startElement('polygon', polygon)
|
| 91 |
+
self._level += 1
|
| 92 |
+
|
| 93 |
+
def open_polyline(self, polyline):
|
| 94 |
+
self._indent()
|
| 95 |
+
self.xmlgen.startElement('polyline', polyline)
|
| 96 |
+
self._level += 1
|
| 97 |
+
|
| 98 |
+
def open_points(self, points):
|
| 99 |
+
self._indent()
|
| 100 |
+
self.xmlgen.startElement('points', points)
|
| 101 |
+
self._level += 1
|
| 102 |
+
|
| 103 |
+
def open_tag(self, tag):
|
| 104 |
+
self._indent()
|
| 105 |
+
self.xmlgen.startElement("tag", tag)
|
| 106 |
+
self._level += 1
|
| 107 |
+
|
| 108 |
+
def add_attribute(self, attribute):
|
| 109 |
+
self._indent()
|
| 110 |
+
self.xmlgen.startElement('attribute', {'name': attribute['name']})
|
| 111 |
+
self.xmlgen.characters(attribute['value'])
|
| 112 |
+
self.xmlgen.endElement('attribute')
|
| 113 |
+
|
| 114 |
+
def _close_element(self, element):
|
| 115 |
+
self._level -= 1
|
| 116 |
+
self._indent()
|
| 117 |
+
self.xmlgen.endElement(element)
|
| 118 |
+
|
| 119 |
+
def close_box(self):
|
| 120 |
+
self._close_element('box')
|
| 121 |
+
|
| 122 |
+
def close_polygon(self):
|
| 123 |
+
self._close_element('polygon')
|
| 124 |
+
|
| 125 |
+
def close_polyline(self):
|
| 126 |
+
self._close_element('polyline')
|
| 127 |
+
|
| 128 |
+
def close_points(self):
|
| 129 |
+
self._close_element('points')
|
| 130 |
+
|
| 131 |
+
def close_tag(self):
|
| 132 |
+
self._close_element('tag')
|
| 133 |
+
|
| 134 |
+
def close_image(self):
|
| 135 |
+
self._close_element('image')
|
| 136 |
+
|
| 137 |
+
def close_track(self):
|
| 138 |
+
self._close_element('track')
|
| 139 |
+
|
| 140 |
+
def close_root(self):
|
| 141 |
+
self._close_element('annotations')
|
| 142 |
+
self.xmlgen.endDocument()
|
| 143 |
+
|
| 144 |
+
class _SubsetWriter:
|
| 145 |
+
def __init__(self, file, name, extractor, context):
|
| 146 |
+
self._writer = XmlAnnotationWriter(file)
|
| 147 |
+
self._name = name
|
| 148 |
+
self._extractor = extractor
|
| 149 |
+
self._context = context
|
| 150 |
+
|
| 151 |
+
def write(self):
|
| 152 |
+
self._writer.open_root()
|
| 153 |
+
self._write_meta()
|
| 154 |
+
|
| 155 |
+
for index, item in enumerate(self._extractor):
|
| 156 |
+
self._write_item(item, index)
|
| 157 |
+
|
| 158 |
+
self._writer.close_root()
|
| 159 |
+
|
| 160 |
+
def _write_item(self, item, index):
|
| 161 |
+
if not self._context._reindex:
|
| 162 |
+
index = cast(item.attributes.get('frame'), int, index)
|
| 163 |
+
image_info = OrderedDict([ ("id", str(index)), ])
|
| 164 |
+
filename = item.id + CvatPath.IMAGE_EXT
|
| 165 |
+
image_info["name"] = filename
|
| 166 |
+
if item.has_image:
|
| 167 |
+
size = item.image.size
|
| 168 |
+
if size:
|
| 169 |
+
h, w = size
|
| 170 |
+
image_info["width"] = str(w)
|
| 171 |
+
image_info["height"] = str(h)
|
| 172 |
+
|
| 173 |
+
if self._context._save_images:
|
| 174 |
+
self._context._save_image(item,
|
| 175 |
+
osp.join(self._context._images_dir, filename))
|
| 176 |
+
else:
|
| 177 |
+
log.debug("Item '%s' has no image info", item.id)
|
| 178 |
+
self._writer.open_image(image_info)
|
| 179 |
+
|
| 180 |
+
for ann in item.annotations:
|
| 181 |
+
if ann.type in {AnnotationType.points, AnnotationType.polyline,
|
| 182 |
+
AnnotationType.polygon, AnnotationType.bbox}:
|
| 183 |
+
self._write_shape(ann)
|
| 184 |
+
elif ann.type == AnnotationType.label:
|
| 185 |
+
self._write_tag(ann)
|
| 186 |
+
else:
|
| 187 |
+
continue
|
| 188 |
+
|
| 189 |
+
self._writer.close_image()
|
| 190 |
+
|
| 191 |
+
def _write_meta(self):
|
| 192 |
+
label_cat = self._extractor.categories()[AnnotationType.label]
|
| 193 |
+
meta = OrderedDict([
|
| 194 |
+
("task", OrderedDict([
|
| 195 |
+
("id", ""),
|
| 196 |
+
("name", self._name),
|
| 197 |
+
("size", str(len(self._extractor))),
|
| 198 |
+
("mode", "annotation"),
|
| 199 |
+
("overlap", ""),
|
| 200 |
+
("start_frame", "0"),
|
| 201 |
+
("stop_frame", str(len(self._extractor))),
|
| 202 |
+
("frame_filter", ""),
|
| 203 |
+
("z_order", "True"),
|
| 204 |
+
|
| 205 |
+
("labels", [
|
| 206 |
+
("label", OrderedDict([
|
| 207 |
+
("name", label.name),
|
| 208 |
+
("attributes", [
|
| 209 |
+
("attribute", OrderedDict([
|
| 210 |
+
("name", attr),
|
| 211 |
+
("mutable", "True"),
|
| 212 |
+
("input_type", "text"),
|
| 213 |
+
("default_value", ""),
|
| 214 |
+
("values", ""),
|
| 215 |
+
])) for attr in label.attributes
|
| 216 |
+
])
|
| 217 |
+
])) for label in label_cat.items
|
| 218 |
+
]),
|
| 219 |
+
])),
|
| 220 |
+
])
|
| 221 |
+
self._writer.write_meta(meta)
|
| 222 |
+
|
| 223 |
+
def _get_label(self, label_id):
|
| 224 |
+
label_cat = self._extractor.categories()[AnnotationType.label]
|
| 225 |
+
return label_cat.items[label_id]
|
| 226 |
+
|
| 227 |
+
def _write_shape(self, shape):
|
| 228 |
+
if shape.label is None:
|
| 229 |
+
return
|
| 230 |
+
|
| 231 |
+
shape_data = OrderedDict([
|
| 232 |
+
("label", self._get_label(shape.label).name),
|
| 233 |
+
("occluded", str(int(shape.attributes.get('occluded', False)))),
|
| 234 |
+
])
|
| 235 |
+
|
| 236 |
+
if shape.type == AnnotationType.bbox:
|
| 237 |
+
shape_data.update(OrderedDict([
|
| 238 |
+
("xtl", "{:.2f}".format(shape.points[0])),
|
| 239 |
+
("ytl", "{:.2f}".format(shape.points[1])),
|
| 240 |
+
("xbr", "{:.2f}".format(shape.points[2])),
|
| 241 |
+
("ybr", "{:.2f}".format(shape.points[3]))
|
| 242 |
+
]))
|
| 243 |
+
else:
|
| 244 |
+
shape_data.update(OrderedDict([
|
| 245 |
+
("points", ';'.join((
|
| 246 |
+
','.join((
|
| 247 |
+
"{:.2f}".format(x),
|
| 248 |
+
"{:.2f}".format(y)
|
| 249 |
+
)) for x, y in pairs(shape.points))
|
| 250 |
+
)),
|
| 251 |
+
]))
|
| 252 |
+
|
| 253 |
+
shape_data['z_order'] = str(int(shape.z_order))
|
| 254 |
+
if shape.group:
|
| 255 |
+
shape_data['group_id'] = str(shape.group)
|
| 256 |
+
|
| 257 |
+
if shape.type == AnnotationType.bbox:
|
| 258 |
+
self._writer.open_box(shape_data)
|
| 259 |
+
elif shape.type == AnnotationType.polygon:
|
| 260 |
+
self._writer.open_polygon(shape_data)
|
| 261 |
+
elif shape.type == AnnotationType.polyline:
|
| 262 |
+
self._writer.open_polyline(shape_data)
|
| 263 |
+
elif shape.type == AnnotationType.points:
|
| 264 |
+
self._writer.open_points(shape_data)
|
| 265 |
+
else:
|
| 266 |
+
raise NotImplementedError("unknown shape type")
|
| 267 |
+
|
| 268 |
+
for attr_name, attr_value in shape.attributes.items():
|
| 269 |
+
if isinstance(attr_value, bool):
|
| 270 |
+
attr_value = 'true' if attr_value else 'false'
|
| 271 |
+
if attr_name in self._get_label(shape.label).attributes:
|
| 272 |
+
self._writer.add_attribute(OrderedDict([
|
| 273 |
+
("name", str(attr_name)),
|
| 274 |
+
("value", str(attr_value)),
|
| 275 |
+
]))
|
| 276 |
+
|
| 277 |
+
if shape.type == AnnotationType.bbox:
|
| 278 |
+
self._writer.close_box()
|
| 279 |
+
elif shape.type == AnnotationType.polygon:
|
| 280 |
+
self._writer.close_polygon()
|
| 281 |
+
elif shape.type == AnnotationType.polyline:
|
| 282 |
+
self._writer.close_polyline()
|
| 283 |
+
elif shape.type == AnnotationType.points:
|
| 284 |
+
self._writer.close_points()
|
| 285 |
+
else:
|
| 286 |
+
raise NotImplementedError("unknown shape type")
|
| 287 |
+
|
| 288 |
+
def _write_tag(self, label):
|
| 289 |
+
if label.label is None:
|
| 290 |
+
return
|
| 291 |
+
|
| 292 |
+
tag_data = OrderedDict([
|
| 293 |
+
('label', self._get_label(label.label).name),
|
| 294 |
+
])
|
| 295 |
+
if label.group:
|
| 296 |
+
tag_data['group_id'] = str(label.group)
|
| 297 |
+
self._writer.open_tag(tag_data)
|
| 298 |
+
|
| 299 |
+
for attr_name, attr_value in label.attributes.items():
|
| 300 |
+
if isinstance(attr_value, bool):
|
| 301 |
+
attr_value = 'true' if attr_value else 'false'
|
| 302 |
+
if attr_name in self._get_label(label.label).attributes:
|
| 303 |
+
self._writer.add_attribute(OrderedDict([
|
| 304 |
+
("name", str(attr_name)),
|
| 305 |
+
("value", str(attr_value)),
|
| 306 |
+
]))
|
| 307 |
+
|
| 308 |
+
self._writer.close_tag()
|
| 309 |
+
|
| 310 |
+
class CvatConverter(Converter):
|
| 311 |
+
DEFAULT_IMAGE_EXT = CvatPath.IMAGE_EXT
|
| 312 |
+
|
| 313 |
+
@classmethod
|
| 314 |
+
def build_cmdline_parser(cls, **kwargs):
|
| 315 |
+
parser = super().build_cmdline_parser(**kwargs)
|
| 316 |
+
parser.add_argument('--reindex', action='store_true',
|
| 317 |
+
help="Assign new indices to frames (default: %(default)s)")
|
| 318 |
+
return parser
|
| 319 |
+
|
| 320 |
+
def __init__(self, extractor, save_dir, reindex=False, **kwargs):
|
| 321 |
+
super().__init__(extractor, save_dir, **kwargs)
|
| 322 |
+
|
| 323 |
+
self._reindex = reindex
|
| 324 |
+
|
| 325 |
+
def apply(self):
|
| 326 |
+
self._images_dir = osp.join(self._save_dir, CvatPath.IMAGES_DIR)
|
| 327 |
+
os.makedirs(self._images_dir, exist_ok=True)
|
| 328 |
+
|
| 329 |
+
for subset_name, subset in self._extractor.subsets().items():
|
| 330 |
+
with open(osp.join(self._save_dir, '%s.xml' % subset_name), 'w') as f:
|
| 331 |
+
writer = _SubsetWriter(f, subset_name, subset, self)
|
| 332 |
+
writer.write()
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/extractor.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from collections import OrderedDict
|
| 7 |
+
import os.path as osp
|
| 8 |
+
from defusedxml import ElementTree
|
| 9 |
+
|
| 10 |
+
from datumaro.components.extractor import (SourceExtractor, DatasetItem,
|
| 11 |
+
AnnotationType, Points, Polygon, PolyLine, Bbox, Label,
|
| 12 |
+
LabelCategories, Importer
|
| 13 |
+
)
|
| 14 |
+
from datumaro.util.image import Image
|
| 15 |
+
|
| 16 |
+
from .format import CvatPath
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class CvatExtractor(SourceExtractor):
|
| 20 |
+
_SUPPORTED_SHAPES = ('box', 'polygon', 'polyline', 'points')
|
| 21 |
+
|
| 22 |
+
def __init__(self, path):
|
| 23 |
+
assert osp.isfile(path), path
|
| 24 |
+
rootpath = osp.dirname(path)
|
| 25 |
+
images_dir = ''
|
| 26 |
+
if osp.isdir(osp.join(rootpath, CvatPath.IMAGES_DIR)):
|
| 27 |
+
images_dir = osp.join(rootpath, CvatPath.IMAGES_DIR)
|
| 28 |
+
self._images_dir = images_dir
|
| 29 |
+
self._path = path
|
| 30 |
+
|
| 31 |
+
super().__init__(subset=osp.splitext(osp.basename(path))[0])
|
| 32 |
+
|
| 33 |
+
items, categories = self._parse(path)
|
| 34 |
+
self._items = list(self._load_items(items).values())
|
| 35 |
+
self._categories = categories
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def _parse(cls, path):
|
| 39 |
+
context = ElementTree.iterparse(path, events=("start", "end"))
|
| 40 |
+
context = iter(context)
|
| 41 |
+
|
| 42 |
+
categories, frame_size = cls._parse_meta(context)
|
| 43 |
+
|
| 44 |
+
items = OrderedDict()
|
| 45 |
+
|
| 46 |
+
track = None
|
| 47 |
+
shape = None
|
| 48 |
+
tag = None
|
| 49 |
+
attributes = None
|
| 50 |
+
image = None
|
| 51 |
+
for ev, el in context:
|
| 52 |
+
if ev == 'start':
|
| 53 |
+
if el.tag == 'track':
|
| 54 |
+
track = {
|
| 55 |
+
'id': el.attrib['id'],
|
| 56 |
+
'label': el.attrib.get('label'),
|
| 57 |
+
'group': int(el.attrib.get('group_id', 0)),
|
| 58 |
+
'height': frame_size[0],
|
| 59 |
+
'width': frame_size[1],
|
| 60 |
+
}
|
| 61 |
+
elif el.tag == 'image':
|
| 62 |
+
image = {
|
| 63 |
+
'name': el.attrib.get('name'),
|
| 64 |
+
'frame': el.attrib['id'],
|
| 65 |
+
'width': el.attrib.get('width'),
|
| 66 |
+
'height': el.attrib.get('height'),
|
| 67 |
+
}
|
| 68 |
+
elif el.tag in cls._SUPPORTED_SHAPES and (track or image):
|
| 69 |
+
attributes = {}
|
| 70 |
+
shape = {
|
| 71 |
+
'type': None,
|
| 72 |
+
'attributes': attributes,
|
| 73 |
+
}
|
| 74 |
+
if track:
|
| 75 |
+
shape.update(track)
|
| 76 |
+
shape['track_id'] = int(track['id'])
|
| 77 |
+
if image:
|
| 78 |
+
shape.update(image)
|
| 79 |
+
elif el.tag == 'tag' and image:
|
| 80 |
+
attributes = {}
|
| 81 |
+
tag = {
|
| 82 |
+
'frame': image['frame'],
|
| 83 |
+
'attributes': attributes,
|
| 84 |
+
'group': int(el.attrib.get('group_id', 0)),
|
| 85 |
+
'label': el.attrib['label'],
|
| 86 |
+
}
|
| 87 |
+
elif ev == 'end':
|
| 88 |
+
if el.tag == 'attribute' and attributes is not None:
|
| 89 |
+
attr_value = el.text or ''
|
| 90 |
+
if el.text in ['true', 'false']:
|
| 91 |
+
attr_value = attr_value == 'true'
|
| 92 |
+
else:
|
| 93 |
+
try:
|
| 94 |
+
attr_value = float(attr_value)
|
| 95 |
+
except ValueError:
|
| 96 |
+
pass
|
| 97 |
+
attributes[el.attrib['name']] = attr_value
|
| 98 |
+
elif el.tag in cls._SUPPORTED_SHAPES:
|
| 99 |
+
if track is not None:
|
| 100 |
+
shape['frame'] = el.attrib['frame']
|
| 101 |
+
shape['outside'] = (el.attrib.get('outside') == '1')
|
| 102 |
+
shape['keyframe'] = (el.attrib.get('keyframe') == '1')
|
| 103 |
+
if image is not None:
|
| 104 |
+
shape['label'] = el.attrib.get('label')
|
| 105 |
+
shape['group'] = int(el.attrib.get('group_id', 0))
|
| 106 |
+
|
| 107 |
+
shape['type'] = el.tag
|
| 108 |
+
shape['occluded'] = (el.attrib.get('occluded') == '1')
|
| 109 |
+
shape['z_order'] = int(el.attrib.get('z_order', 0))
|
| 110 |
+
|
| 111 |
+
if el.tag == 'box':
|
| 112 |
+
shape['points'] = list(map(float, [
|
| 113 |
+
el.attrib['xtl'], el.attrib['ytl'],
|
| 114 |
+
el.attrib['xbr'], el.attrib['ybr'],
|
| 115 |
+
]))
|
| 116 |
+
else:
|
| 117 |
+
shape['points'] = []
|
| 118 |
+
for pair in el.attrib['points'].split(';'):
|
| 119 |
+
shape['points'].extend(map(float, pair.split(',')))
|
| 120 |
+
|
| 121 |
+
frame_desc = items.get(shape['frame'], {'annotations': []})
|
| 122 |
+
frame_desc['annotations'].append(
|
| 123 |
+
cls._parse_shape_ann(shape, categories))
|
| 124 |
+
items[shape['frame']] = frame_desc
|
| 125 |
+
shape = None
|
| 126 |
+
|
| 127 |
+
elif el.tag == 'tag':
|
| 128 |
+
frame_desc = items.get(tag['frame'], {'annotations': []})
|
| 129 |
+
frame_desc['annotations'].append(
|
| 130 |
+
cls._parse_tag_ann(tag, categories))
|
| 131 |
+
items[tag['frame']] = frame_desc
|
| 132 |
+
tag = None
|
| 133 |
+
elif el.tag == 'track':
|
| 134 |
+
track = None
|
| 135 |
+
elif el.tag == 'image':
|
| 136 |
+
frame_desc = items.get(image['frame'], {'annotations': []})
|
| 137 |
+
frame_desc.update({
|
| 138 |
+
'name': image.get('name'),
|
| 139 |
+
'height': image.get('height'),
|
| 140 |
+
'width': image.get('width'),
|
| 141 |
+
})
|
| 142 |
+
items[image['frame']] = frame_desc
|
| 143 |
+
image = None
|
| 144 |
+
el.clear()
|
| 145 |
+
|
| 146 |
+
return items, categories
|
| 147 |
+
|
| 148 |
+
@staticmethod
|
| 149 |
+
def _parse_meta(context):
|
| 150 |
+
ev, el = next(context)
|
| 151 |
+
if not (ev == 'start' and el.tag == 'annotations'):
|
| 152 |
+
raise Exception("Unexpected token ")
|
| 153 |
+
|
| 154 |
+
categories = {}
|
| 155 |
+
|
| 156 |
+
frame_size = None
|
| 157 |
+
mode = None
|
| 158 |
+
labels = OrderedDict()
|
| 159 |
+
label = None
|
| 160 |
+
|
| 161 |
+
# Recursive descent parser
|
| 162 |
+
el = None
|
| 163 |
+
states = ['annotations']
|
| 164 |
+
def accepted(expected_state, tag, next_state=None):
|
| 165 |
+
state = states[-1]
|
| 166 |
+
if state == expected_state and el is not None and el.tag == tag:
|
| 167 |
+
if not next_state:
|
| 168 |
+
next_state = tag
|
| 169 |
+
states.append(next_state)
|
| 170 |
+
return True
|
| 171 |
+
return False
|
| 172 |
+
def consumed(expected_state, tag):
|
| 173 |
+
state = states[-1]
|
| 174 |
+
if state == expected_state and el is not None and el.tag == tag:
|
| 175 |
+
states.pop()
|
| 176 |
+
return True
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
for ev, el in context:
|
| 180 |
+
if ev == 'start':
|
| 181 |
+
if accepted('annotations', 'meta'): pass
|
| 182 |
+
elif accepted('meta', 'task'): pass
|
| 183 |
+
elif accepted('task', 'mode'): pass
|
| 184 |
+
elif accepted('task', 'original_size'):
|
| 185 |
+
frame_size = [None, None]
|
| 186 |
+
elif accepted('original_size', 'height', next_state='frame_height'): pass
|
| 187 |
+
elif accepted('original_size', 'width', next_state='frame_width'): pass
|
| 188 |
+
elif accepted('task', 'labels'): pass
|
| 189 |
+
elif accepted('labels', 'label'):
|
| 190 |
+
label = { 'name': None, 'attributes': set() }
|
| 191 |
+
elif accepted('label', 'name', next_state='label_name'): pass
|
| 192 |
+
elif accepted('label', 'attributes'): pass
|
| 193 |
+
elif accepted('attributes', 'attribute'): pass
|
| 194 |
+
elif accepted('attribute', 'name', next_state='attr_name'): pass
|
| 195 |
+
elif accepted('annotations', 'image') or \
|
| 196 |
+
accepted('annotations', 'track') or \
|
| 197 |
+
accepted('annotations', 'tag'):
|
| 198 |
+
break
|
| 199 |
+
else:
|
| 200 |
+
pass
|
| 201 |
+
elif ev == 'end':
|
| 202 |
+
if consumed('meta', 'meta'):
|
| 203 |
+
break
|
| 204 |
+
elif consumed('task', 'task'): pass
|
| 205 |
+
elif consumed('mode', 'mode'):
|
| 206 |
+
mode = el.text
|
| 207 |
+
elif consumed('original_size', 'original_size'): pass
|
| 208 |
+
elif consumed('frame_height', 'height'):
|
| 209 |
+
frame_size[0] = int(el.text)
|
| 210 |
+
elif consumed('frame_width', 'width'):
|
| 211 |
+
frame_size[1] = int(el.text)
|
| 212 |
+
elif consumed('label_name', 'name'):
|
| 213 |
+
label['name'] = el.text
|
| 214 |
+
elif consumed('attr_name', 'name'):
|
| 215 |
+
label['attributes'].add(el.text)
|
| 216 |
+
elif consumed('attribute', 'attribute'): pass
|
| 217 |
+
elif consumed('attributes', 'attributes'): pass
|
| 218 |
+
elif consumed('label', 'label'):
|
| 219 |
+
labels[label['name']] = label['attributes']
|
| 220 |
+
label = None
|
| 221 |
+
elif consumed('labels', 'labels'): pass
|
| 222 |
+
else:
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
assert len(states) == 1 and states[0] == 'annotations', \
|
| 226 |
+
"Expected 'meta' section in the annotation file, path: %s" % states
|
| 227 |
+
|
| 228 |
+
common_attrs = ['occluded']
|
| 229 |
+
if mode == 'interpolation':
|
| 230 |
+
common_attrs.append('keyframe')
|
| 231 |
+
common_attrs.append('outside')
|
| 232 |
+
common_attrs.append('track_id')
|
| 233 |
+
|
| 234 |
+
label_cat = LabelCategories(attributes=common_attrs)
|
| 235 |
+
for label, attrs in labels.items():
|
| 236 |
+
label_cat.add(label, attributes=attrs)
|
| 237 |
+
|
| 238 |
+
categories[AnnotationType.label] = label_cat
|
| 239 |
+
|
| 240 |
+
return categories, frame_size
|
| 241 |
+
|
| 242 |
+
@classmethod
|
| 243 |
+
def _parse_shape_ann(cls, ann, categories):
|
| 244 |
+
ann_id = ann.get('id', 0)
|
| 245 |
+
ann_type = ann['type']
|
| 246 |
+
|
| 247 |
+
attributes = ann.get('attributes') or {}
|
| 248 |
+
if 'occluded' in categories[AnnotationType.label].attributes:
|
| 249 |
+
attributes['occluded'] = ann.get('occluded', False)
|
| 250 |
+
if 'outside' in ann:
|
| 251 |
+
attributes['outside'] = ann['outside']
|
| 252 |
+
if 'keyframe' in ann:
|
| 253 |
+
attributes['keyframe'] = ann['keyframe']
|
| 254 |
+
if 'track_id' in ann:
|
| 255 |
+
attributes['track_id'] = ann['track_id']
|
| 256 |
+
|
| 257 |
+
group = ann.get('group')
|
| 258 |
+
|
| 259 |
+
label = ann.get('label')
|
| 260 |
+
label_id = categories[AnnotationType.label].find(label)[0]
|
| 261 |
+
|
| 262 |
+
z_order = ann.get('z_order', 0)
|
| 263 |
+
points = ann.get('points', [])
|
| 264 |
+
|
| 265 |
+
if ann_type == 'polyline':
|
| 266 |
+
return PolyLine(points, label=label_id, z_order=z_order,
|
| 267 |
+
id=ann_id, attributes=attributes, group=group)
|
| 268 |
+
|
| 269 |
+
elif ann_type == 'polygon':
|
| 270 |
+
return Polygon(points, label=label_id, z_order=z_order,
|
| 271 |
+
id=ann_id, attributes=attributes, group=group)
|
| 272 |
+
|
| 273 |
+
elif ann_type == 'points':
|
| 274 |
+
return Points(points, label=label_id, z_order=z_order,
|
| 275 |
+
id=ann_id, attributes=attributes, group=group)
|
| 276 |
+
|
| 277 |
+
elif ann_type == 'box':
|
| 278 |
+
x, y = points[0], points[1]
|
| 279 |
+
w, h = points[2] - x, points[3] - y
|
| 280 |
+
return Bbox(x, y, w, h, label=label_id, z_order=z_order,
|
| 281 |
+
id=ann_id, attributes=attributes, group=group)
|
| 282 |
+
|
| 283 |
+
else:
|
| 284 |
+
raise NotImplementedError("Unknown annotation type '%s'" % ann_type)
|
| 285 |
+
|
| 286 |
+
@classmethod
|
| 287 |
+
def _parse_tag_ann(cls, ann, categories):
|
| 288 |
+
label = ann.get('label')
|
| 289 |
+
label_id = categories[AnnotationType.label].find(label)[0]
|
| 290 |
+
group = ann.get('group')
|
| 291 |
+
attributes = ann.get('attributes')
|
| 292 |
+
return Label(label_id, attributes=attributes, group=group)
|
| 293 |
+
|
| 294 |
+
def _load_items(self, parsed):
|
| 295 |
+
for frame_id, item_desc in parsed.items():
|
| 296 |
+
name = item_desc.get('name', 'frame_%06d.png' % int(frame_id))
|
| 297 |
+
image = osp.join(self._images_dir, name)
|
| 298 |
+
image_size = (item_desc.get('height'), item_desc.get('width'))
|
| 299 |
+
if all(image_size):
|
| 300 |
+
image = Image(path=image, size=tuple(map(int, image_size)))
|
| 301 |
+
|
| 302 |
+
parsed[frame_id] = DatasetItem(id=osp.splitext(name)[0],
|
| 303 |
+
subset=self._subset, image=image,
|
| 304 |
+
annotations=item_desc.get('annotations'),
|
| 305 |
+
attributes={'frame': int(frame_id)})
|
| 306 |
+
return parsed
|
| 307 |
+
|
| 308 |
+
class CvatImporter(Importer):
|
| 309 |
+
@classmethod
|
| 310 |
+
def find_sources(cls, path):
|
| 311 |
+
return cls._find_sources_recursive(path, '.xml', 'cvat')
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/extractor.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os.path as osp
|
| 8 |
+
|
| 9 |
+
from datumaro.components.extractor import (SourceExtractor, DatasetItem,
|
| 10 |
+
AnnotationType, Label, RleMask, Points, Polygon, PolyLine, Bbox, Caption,
|
| 11 |
+
LabelCategories, MaskCategories, PointsCategories, Importer
|
| 12 |
+
)
|
| 13 |
+
from datumaro.util.image import Image
|
| 14 |
+
|
| 15 |
+
from .format import DatumaroPath
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class DatumaroExtractor(SourceExtractor):
|
| 19 |
+
def __init__(self, path):
|
| 20 |
+
assert osp.isfile(path), path
|
| 21 |
+
rootpath = ''
|
| 22 |
+
if path.endswith(osp.join(DatumaroPath.ANNOTATIONS_DIR, osp.basename(path))):
|
| 23 |
+
rootpath = path.rsplit(DatumaroPath.ANNOTATIONS_DIR, maxsplit=1)[0]
|
| 24 |
+
images_dir = ''
|
| 25 |
+
if rootpath and osp.isdir(osp.join(rootpath, DatumaroPath.IMAGES_DIR)):
|
| 26 |
+
images_dir = osp.join(rootpath, DatumaroPath.IMAGES_DIR)
|
| 27 |
+
self._images_dir = images_dir
|
| 28 |
+
|
| 29 |
+
super().__init__(subset=osp.splitext(osp.basename(path))[0])
|
| 30 |
+
|
| 31 |
+
with open(path, 'r') as f:
|
| 32 |
+
parsed_anns = json.load(f)
|
| 33 |
+
self._categories = self._load_categories(parsed_anns)
|
| 34 |
+
self._items = self._load_items(parsed_anns)
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def _load_categories(parsed):
|
| 38 |
+
categories = {}
|
| 39 |
+
|
| 40 |
+
parsed_label_cat = parsed['categories'].get(AnnotationType.label.name)
|
| 41 |
+
if parsed_label_cat:
|
| 42 |
+
label_categories = LabelCategories()
|
| 43 |
+
for item in parsed_label_cat['labels']:
|
| 44 |
+
label_categories.add(item['name'], parent=item['parent'])
|
| 45 |
+
|
| 46 |
+
categories[AnnotationType.label] = label_categories
|
| 47 |
+
|
| 48 |
+
parsed_mask_cat = parsed['categories'].get(AnnotationType.mask.name)
|
| 49 |
+
if parsed_mask_cat:
|
| 50 |
+
colormap = {}
|
| 51 |
+
for item in parsed_mask_cat['colormap']:
|
| 52 |
+
colormap[int(item['label_id'])] = \
|
| 53 |
+
(item['r'], item['g'], item['b'])
|
| 54 |
+
|
| 55 |
+
mask_categories = MaskCategories(colormap=colormap)
|
| 56 |
+
categories[AnnotationType.mask] = mask_categories
|
| 57 |
+
|
| 58 |
+
parsed_points_cat = parsed['categories'].get(AnnotationType.points.name)
|
| 59 |
+
if parsed_points_cat:
|
| 60 |
+
point_categories = PointsCategories()
|
| 61 |
+
for item in parsed_points_cat['items']:
|
| 62 |
+
point_categories.add(int(item['label_id']),
|
| 63 |
+
item['labels'], joints=item['joints'])
|
| 64 |
+
|
| 65 |
+
categories[AnnotationType.points] = point_categories
|
| 66 |
+
|
| 67 |
+
return categories
|
| 68 |
+
|
| 69 |
+
def _load_items(self, parsed):
|
| 70 |
+
items = []
|
| 71 |
+
for item_desc in parsed['items']:
|
| 72 |
+
item_id = item_desc['id']
|
| 73 |
+
|
| 74 |
+
image = None
|
| 75 |
+
image_info = item_desc.get('image')
|
| 76 |
+
if image_info:
|
| 77 |
+
image_path = image_info.get('path') or \
|
| 78 |
+
item_id + DatumaroPath.IMAGE_EXT
|
| 79 |
+
image_path = osp.join(self._images_dir, image_path)
|
| 80 |
+
image = Image(path=image_path, size=image_info.get('size'))
|
| 81 |
+
|
| 82 |
+
annotations = self._load_annotations(item_desc)
|
| 83 |
+
|
| 84 |
+
item = DatasetItem(id=item_id, subset=self._subset,
|
| 85 |
+
annotations=annotations, image=image,
|
| 86 |
+
attributes=item_desc.get('attr'))
|
| 87 |
+
|
| 88 |
+
items.append(item)
|
| 89 |
+
|
| 90 |
+
return items
|
| 91 |
+
|
| 92 |
+
@staticmethod
|
| 93 |
+
def _load_annotations(item):
|
| 94 |
+
parsed = item['annotations']
|
| 95 |
+
loaded = []
|
| 96 |
+
|
| 97 |
+
for ann in parsed:
|
| 98 |
+
ann_id = ann.get('id')
|
| 99 |
+
ann_type = AnnotationType[ann['type']]
|
| 100 |
+
attributes = ann.get('attributes')
|
| 101 |
+
group = ann.get('group')
|
| 102 |
+
|
| 103 |
+
label_id = ann.get('label_id')
|
| 104 |
+
z_order = ann.get('z_order')
|
| 105 |
+
points = ann.get('points')
|
| 106 |
+
|
| 107 |
+
if ann_type == AnnotationType.label:
|
| 108 |
+
loaded.append(Label(label=label_id,
|
| 109 |
+
id=ann_id, attributes=attributes, group=group))
|
| 110 |
+
|
| 111 |
+
elif ann_type == AnnotationType.mask:
|
| 112 |
+
rle = ann['rle']
|
| 113 |
+
rle['counts'] = rle['counts'].encode('ascii')
|
| 114 |
+
loaded.append(RleMask(rle=rle, label=label_id,
|
| 115 |
+
id=ann_id, attributes=attributes, group=group,
|
| 116 |
+
z_order=z_order))
|
| 117 |
+
|
| 118 |
+
elif ann_type == AnnotationType.polyline:
|
| 119 |
+
loaded.append(PolyLine(points, label=label_id,
|
| 120 |
+
id=ann_id, attributes=attributes, group=group,
|
| 121 |
+
z_order=z_order))
|
| 122 |
+
|
| 123 |
+
elif ann_type == AnnotationType.polygon:
|
| 124 |
+
loaded.append(Polygon(points, label=label_id,
|
| 125 |
+
id=ann_id, attributes=attributes, group=group,
|
| 126 |
+
z_order=z_order))
|
| 127 |
+
|
| 128 |
+
elif ann_type == AnnotationType.bbox:
|
| 129 |
+
x, y, w, h = ann['bbox']
|
| 130 |
+
loaded.append(Bbox(x, y, w, h, label=label_id,
|
| 131 |
+
id=ann_id, attributes=attributes, group=group,
|
| 132 |
+
z_order=z_order))
|
| 133 |
+
|
| 134 |
+
elif ann_type == AnnotationType.points:
|
| 135 |
+
loaded.append(Points(points, label=label_id,
|
| 136 |
+
id=ann_id, attributes=attributes, group=group,
|
| 137 |
+
z_order=z_order))
|
| 138 |
+
|
| 139 |
+
elif ann_type == AnnotationType.caption:
|
| 140 |
+
caption = ann.get('caption')
|
| 141 |
+
loaded.append(Caption(caption,
|
| 142 |
+
id=ann_id, attributes=attributes, group=group))
|
| 143 |
+
|
| 144 |
+
else:
|
| 145 |
+
raise NotImplementedError()
|
| 146 |
+
|
| 147 |
+
return loaded
|
| 148 |
+
|
| 149 |
+
class DatumaroImporter(Importer):
|
| 150 |
+
@classmethod
|
| 151 |
+
def find_sources(cls, path):
|
| 152 |
+
return cls._find_sources_recursive(path, '.json', 'datumaro')
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/__init__.py
ADDED
|
File without changes
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/format.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
class DetectionApiPath:
|
| 7 |
+
IMAGES_DIR = 'images'
|
| 8 |
+
ANNOTATIONS_DIR = 'annotations'
|
| 9 |
+
|
| 10 |
+
DEFAULT_IMAGE_EXT = '.jpg'
|
| 11 |
+
IMAGE_EXT_FORMAT = {'.jpg': 'jpeg', '.jpeg': 'jpeg', '.png': 'png'}
|
| 12 |
+
|
| 13 |
+
LABELMAP_FILE = 'label_map.pbtxt'
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/extractor.py
ADDED
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
import logging as log
|
| 8 |
+
import numpy as np
|
| 9 |
+
import os.path as osp
|
| 10 |
+
from defusedxml import ElementTree
|
| 11 |
+
|
| 12 |
+
from datumaro.components.extractor import (SourceExtractor, DatasetItem,
|
| 13 |
+
AnnotationType, Label, Mask, Bbox, CompiledMask
|
| 14 |
+
)
|
| 15 |
+
from datumaro.util import dir_items
|
| 16 |
+
from datumaro.util.image import Image
|
| 17 |
+
from datumaro.util.mask_tools import lazy_mask, invert_colormap
|
| 18 |
+
|
| 19 |
+
from .format import (
|
| 20 |
+
VocTask, VocPath, VocInstColormap, parse_label_map, make_voc_categories
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_inverse_inst_colormap = invert_colormap(VocInstColormap)
|
| 25 |
+
|
| 26 |
+
class _VocExtractor(SourceExtractor):
|
| 27 |
+
def __init__(self, path):
|
| 28 |
+
assert osp.isfile(path), path
|
| 29 |
+
self._path = path
|
| 30 |
+
self._dataset_dir = osp.dirname(osp.dirname(osp.dirname(path)))
|
| 31 |
+
|
| 32 |
+
super().__init__(subset=osp.splitext(osp.basename(path))[0])
|
| 33 |
+
|
| 34 |
+
self._categories = self._load_categories(self._dataset_dir)
|
| 35 |
+
|
| 36 |
+
label_color = lambda label_idx: \
|
| 37 |
+
self._categories[AnnotationType.mask].colormap.get(label_idx, None)
|
| 38 |
+
log.debug("Loaded labels: %s" % ', '.join(
|
| 39 |
+
"'%s' %s" % (l.name, ('(%s, %s, %s)' % c) if c else '')
|
| 40 |
+
for i, l, c in ((i, l, label_color(i)) for i, l in enumerate(
|
| 41 |
+
self._categories[AnnotationType.label].items
|
| 42 |
+
))
|
| 43 |
+
))
|
| 44 |
+
self._items = self._load_subset_list(path)
|
| 45 |
+
|
| 46 |
+
def _get_label_id(self, label):
|
| 47 |
+
label_id, _ = self._categories[AnnotationType.label].find(label)
|
| 48 |
+
assert label_id is not None, label
|
| 49 |
+
return label_id
|
| 50 |
+
|
| 51 |
+
@staticmethod
|
| 52 |
+
def _load_categories(dataset_path):
|
| 53 |
+
label_map = None
|
| 54 |
+
label_map_path = osp.join(dataset_path, VocPath.LABELMAP_FILE)
|
| 55 |
+
if osp.isfile(label_map_path):
|
| 56 |
+
label_map = parse_label_map(label_map_path)
|
| 57 |
+
return make_voc_categories(label_map)
|
| 58 |
+
|
| 59 |
+
@staticmethod
|
| 60 |
+
def _load_subset_list(subset_path):
|
| 61 |
+
with open(subset_path) as f:
|
| 62 |
+
return [line.split()[0] for line in f]
|
| 63 |
+
|
| 64 |
+
class VocClassificationExtractor(_VocExtractor):
|
| 65 |
+
def __iter__(self):
|
| 66 |
+
raw_anns = self._load_annotations()
|
| 67 |
+
for item_id in self._items:
|
| 68 |
+
log.debug("Reading item '%s'" % item_id)
|
| 69 |
+
image = osp.join(self._dataset_dir, VocPath.IMAGES_DIR,
|
| 70 |
+
item_id + VocPath.IMAGE_EXT)
|
| 71 |
+
anns = self._parse_annotations(raw_anns, item_id)
|
| 72 |
+
yield DatasetItem(id=item_id, subset=self._subset,
|
| 73 |
+
image=image, annotations=anns)
|
| 74 |
+
|
| 75 |
+
def _load_annotations(self):
|
| 76 |
+
annotations = defaultdict(list)
|
| 77 |
+
task_dir = osp.dirname(self._path)
|
| 78 |
+
anno_files = [s for s in dir_items(task_dir, '.txt')
|
| 79 |
+
if s.endswith('_' + osp.basename(self._path))]
|
| 80 |
+
for ann_filename in anno_files:
|
| 81 |
+
with open(osp.join(task_dir, ann_filename)) as f:
|
| 82 |
+
label = ann_filename[:ann_filename.rfind('_')]
|
| 83 |
+
label_id = self._get_label_id(label)
|
| 84 |
+
for line in f:
|
| 85 |
+
item, present = line.split()
|
| 86 |
+
if present == '1':
|
| 87 |
+
annotations[item].append(label_id)
|
| 88 |
+
|
| 89 |
+
return dict(annotations)
|
| 90 |
+
|
| 91 |
+
@staticmethod
|
| 92 |
+
def _parse_annotations(raw_anns, item_id):
|
| 93 |
+
return [Label(label_id) for label_id in raw_anns.get(item_id, [])]
|
| 94 |
+
|
| 95 |
+
class _VocXmlExtractor(_VocExtractor):
|
| 96 |
+
def __init__(self, path, task):
|
| 97 |
+
super().__init__(path)
|
| 98 |
+
self._task = task
|
| 99 |
+
|
| 100 |
+
def __iter__(self):
|
| 101 |
+
anno_dir = osp.join(self._dataset_dir, VocPath.ANNOTATIONS_DIR)
|
| 102 |
+
|
| 103 |
+
for item_id in self._items:
|
| 104 |
+
log.debug("Reading item '%s'" % item_id)
|
| 105 |
+
image = item_id + VocPath.IMAGE_EXT
|
| 106 |
+
height, width = 0, 0
|
| 107 |
+
|
| 108 |
+
anns = []
|
| 109 |
+
ann_file = osp.join(anno_dir, item_id + '.xml')
|
| 110 |
+
if osp.isfile(ann_file):
|
| 111 |
+
root_elem = ElementTree.parse(ann_file)
|
| 112 |
+
height = root_elem.find('size/height')
|
| 113 |
+
if height is not None:
|
| 114 |
+
height = int(height.text)
|
| 115 |
+
width = root_elem.find('size/width')
|
| 116 |
+
if width is not None:
|
| 117 |
+
width = int(width.text)
|
| 118 |
+
filename_elem = root_elem.find('filename')
|
| 119 |
+
if filename_elem is not None:
|
| 120 |
+
image = filename_elem.text
|
| 121 |
+
anns = self._parse_annotations(root_elem)
|
| 122 |
+
|
| 123 |
+
image = osp.join(self._dataset_dir, VocPath.IMAGES_DIR, image)
|
| 124 |
+
if height and width:
|
| 125 |
+
image = Image(path=image, size=(height, width))
|
| 126 |
+
|
| 127 |
+
yield DatasetItem(id=item_id, subset=self._subset,
|
| 128 |
+
image=image, annotations=anns)
|
| 129 |
+
|
| 130 |
+
def _parse_annotations(self, root_elem):
|
| 131 |
+
item_annotations = []
|
| 132 |
+
|
| 133 |
+
for obj_id, object_elem in enumerate(root_elem.findall('object')):
|
| 134 |
+
obj_id += 1
|
| 135 |
+
attributes = {}
|
| 136 |
+
group = obj_id
|
| 137 |
+
|
| 138 |
+
obj_label_id = None
|
| 139 |
+
label_elem = object_elem.find('name')
|
| 140 |
+
if label_elem is not None:
|
| 141 |
+
obj_label_id = self._get_label_id(label_elem.text)
|
| 142 |
+
|
| 143 |
+
obj_bbox = self._parse_bbox(object_elem)
|
| 144 |
+
|
| 145 |
+
if obj_label_id is None or obj_bbox is None:
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
difficult_elem = object_elem.find('difficult')
|
| 149 |
+
attributes['difficult'] = difficult_elem is not None and \
|
| 150 |
+
difficult_elem.text == '1'
|
| 151 |
+
|
| 152 |
+
truncated_elem = object_elem.find('truncated')
|
| 153 |
+
attributes['truncated'] = truncated_elem is not None and \
|
| 154 |
+
truncated_elem.text == '1'
|
| 155 |
+
|
| 156 |
+
occluded_elem = object_elem.find('occluded')
|
| 157 |
+
attributes['occluded'] = occluded_elem is not None and \
|
| 158 |
+
occluded_elem.text == '1'
|
| 159 |
+
|
| 160 |
+
pose_elem = object_elem.find('pose')
|
| 161 |
+
if pose_elem is not None:
|
| 162 |
+
attributes['pose'] = pose_elem.text
|
| 163 |
+
|
| 164 |
+
point_elem = object_elem.find('point')
|
| 165 |
+
if point_elem is not None:
|
| 166 |
+
point_x = point_elem.find('x')
|
| 167 |
+
point_y = point_elem.find('y')
|
| 168 |
+
point = [float(point_x.text), float(point_y.text)]
|
| 169 |
+
attributes['point'] = point
|
| 170 |
+
|
| 171 |
+
actions_elem = object_elem.find('actions')
|
| 172 |
+
actions = {a: False
|
| 173 |
+
for a in self._categories[AnnotationType.label] \
|
| 174 |
+
.items[obj_label_id].attributes}
|
| 175 |
+
if actions_elem is not None:
|
| 176 |
+
for action_elem in actions_elem:
|
| 177 |
+
actions[action_elem.tag] = (action_elem.text == '1')
|
| 178 |
+
for action, present in actions.items():
|
| 179 |
+
attributes[action] = present
|
| 180 |
+
|
| 181 |
+
has_parts = False
|
| 182 |
+
for part_elem in object_elem.findall('part'):
|
| 183 |
+
part = part_elem.find('name').text
|
| 184 |
+
part_label_id = self._get_label_id(part)
|
| 185 |
+
part_bbox = self._parse_bbox(part_elem)
|
| 186 |
+
|
| 187 |
+
if self._task is not VocTask.person_layout:
|
| 188 |
+
break
|
| 189 |
+
if part_bbox is None:
|
| 190 |
+
continue
|
| 191 |
+
has_parts = True
|
| 192 |
+
item_annotations.append(Bbox(*part_bbox, label=part_label_id,
|
| 193 |
+
group=group))
|
| 194 |
+
|
| 195 |
+
attributes_elem = object_elem.find('attributes')
|
| 196 |
+
if attributes_elem is not None:
|
| 197 |
+
for attr_elem in attributes_elem.iter('attribute'):
|
| 198 |
+
attributes[attr_elem.find('name').text] = \
|
| 199 |
+
attr_elem.find('value').text
|
| 200 |
+
|
| 201 |
+
if self._task is VocTask.person_layout and not has_parts:
|
| 202 |
+
continue
|
| 203 |
+
if self._task is VocTask.action_classification and not actions:
|
| 204 |
+
continue
|
| 205 |
+
|
| 206 |
+
item_annotations.append(Bbox(*obj_bbox, label=obj_label_id,
|
| 207 |
+
attributes=attributes, id=obj_id, group=group))
|
| 208 |
+
|
| 209 |
+
return item_annotations
|
| 210 |
+
|
| 211 |
+
@staticmethod
|
| 212 |
+
def _parse_bbox(object_elem):
|
| 213 |
+
bbox_elem = object_elem.find('bndbox')
|
| 214 |
+
xmin = float(bbox_elem.find('xmin').text)
|
| 215 |
+
xmax = float(bbox_elem.find('xmax').text)
|
| 216 |
+
ymin = float(bbox_elem.find('ymin').text)
|
| 217 |
+
ymax = float(bbox_elem.find('ymax').text)
|
| 218 |
+
return [xmin, ymin, xmax - xmin, ymax - ymin]
|
| 219 |
+
|
| 220 |
+
class VocDetectionExtractor(_VocXmlExtractor):
|
| 221 |
+
def __init__(self, path):
|
| 222 |
+
super().__init__(path, task=VocTask.detection)
|
| 223 |
+
|
| 224 |
+
class VocLayoutExtractor(_VocXmlExtractor):
|
| 225 |
+
def __init__(self, path):
|
| 226 |
+
super().__init__(path, task=VocTask.person_layout)
|
| 227 |
+
|
| 228 |
+
class VocActionExtractor(_VocXmlExtractor):
|
| 229 |
+
def __init__(self, path):
|
| 230 |
+
super().__init__(path, task=VocTask.action_classification)
|
| 231 |
+
|
| 232 |
+
class VocSegmentationExtractor(_VocExtractor):
|
| 233 |
+
def __iter__(self):
|
| 234 |
+
for item_id in self._items:
|
| 235 |
+
log.debug("Reading item '%s'" % item_id)
|
| 236 |
+
image = osp.join(self._dataset_dir, VocPath.IMAGES_DIR,
|
| 237 |
+
item_id + VocPath.IMAGE_EXT)
|
| 238 |
+
anns = self._load_annotations(item_id)
|
| 239 |
+
yield DatasetItem(id=item_id, subset=self._subset,
|
| 240 |
+
image=image, annotations=anns)
|
| 241 |
+
|
| 242 |
+
@staticmethod
|
| 243 |
+
def _lazy_extract_mask(mask, c):
|
| 244 |
+
return lambda: mask == c
|
| 245 |
+
|
| 246 |
+
def _load_annotations(self, item_id):
|
| 247 |
+
item_annotations = []
|
| 248 |
+
|
| 249 |
+
class_mask = None
|
| 250 |
+
segm_path = osp.join(self._dataset_dir, VocPath.SEGMENTATION_DIR,
|
| 251 |
+
item_id + VocPath.SEGM_EXT)
|
| 252 |
+
if osp.isfile(segm_path):
|
| 253 |
+
inverse_cls_colormap = \
|
| 254 |
+
self._categories[AnnotationType.mask].inverse_colormap
|
| 255 |
+
class_mask = lazy_mask(segm_path, inverse_cls_colormap)
|
| 256 |
+
|
| 257 |
+
instances_mask = None
|
| 258 |
+
inst_path = osp.join(self._dataset_dir, VocPath.INSTANCES_DIR,
|
| 259 |
+
item_id + VocPath.SEGM_EXT)
|
| 260 |
+
if osp.isfile(inst_path):
|
| 261 |
+
instances_mask = lazy_mask(inst_path, _inverse_inst_colormap)
|
| 262 |
+
|
| 263 |
+
if instances_mask is not None:
|
| 264 |
+
compiled_mask = CompiledMask(class_mask, instances_mask)
|
| 265 |
+
|
| 266 |
+
if class_mask is not None:
|
| 267 |
+
label_cat = self._categories[AnnotationType.label]
|
| 268 |
+
instance_labels = compiled_mask.get_instance_labels()
|
| 269 |
+
else:
|
| 270 |
+
instance_labels = {i: None
|
| 271 |
+
for i in range(compiled_mask.instance_count)}
|
| 272 |
+
|
| 273 |
+
for instance_id, label_id in instance_labels.items():
|
| 274 |
+
image = compiled_mask.lazy_extract(instance_id)
|
| 275 |
+
|
| 276 |
+
attributes = {}
|
| 277 |
+
if label_id is not None:
|
| 278 |
+
actions = {a: False
|
| 279 |
+
for a in label_cat.items[label_id].attributes
|
| 280 |
+
}
|
| 281 |
+
attributes.update(actions)
|
| 282 |
+
|
| 283 |
+
item_annotations.append(Mask(
|
| 284 |
+
image=image, label=label_id,
|
| 285 |
+
attributes=attributes, group=instance_id
|
| 286 |
+
))
|
| 287 |
+
elif class_mask is not None:
|
| 288 |
+
log.warn("item '%s': has only class segmentation, "
|
| 289 |
+
"instance masks will not be available" % item_id)
|
| 290 |
+
class_mask = class_mask()
|
| 291 |
+
classes = np.unique(class_mask)
|
| 292 |
+
for label_id in classes:
|
| 293 |
+
image = self._lazy_extract_mask(class_mask, label_id)
|
| 294 |
+
item_annotations.append(Mask(image=image, label=label_id))
|
| 295 |
+
|
| 296 |
+
return item_annotations
|
testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/converter.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (C) 2019-2020 Intel Corporation
|
| 3 |
+
#
|
| 4 |
+
# SPDX-License-Identifier: MIT
|
| 5 |
+
|
| 6 |
+
import logging as log
|
| 7 |
+
import os
|
| 8 |
+
import os.path as osp
|
| 9 |
+
from collections import OrderedDict
|
| 10 |
+
|
| 11 |
+
from datumaro.components.converter import Converter
|
| 12 |
+
from datumaro.components.extractor import AnnotationType, DEFAULT_SUBSET_NAME
|
| 13 |
+
|
| 14 |
+
from .format import YoloPath
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _make_yolo_bbox(img_size, box):
|
| 18 |
+
# https://github.com/pjreddie/darknet/blob/master/scripts/voc_label.py
|
| 19 |
+
# <x> <y> <width> <height> - values relative to width and height of image
|
| 20 |
+
# <x> <y> - are center of rectangle
|
| 21 |
+
x = (box[0] + box[2]) / 2 / img_size[0]
|
| 22 |
+
y = (box[1] + box[3]) / 2 / img_size[1]
|
| 23 |
+
w = (box[2] - box[0]) / img_size[0]
|
| 24 |
+
h = (box[3] - box[1]) / img_size[1]
|
| 25 |
+
return x, y, w, h
|
| 26 |
+
|
| 27 |
+
class YoloConverter(Converter):
|
| 28 |
+
# https://github.com/AlexeyAB/darknet#how-to-train-to-detect-your-custom-objects
|
| 29 |
+
DEFAULT_IMAGE_EXT = '.jpg'
|
| 30 |
+
|
| 31 |
+
def apply(self):
|
| 32 |
+
extractor = self._extractor
|
| 33 |
+
save_dir = self._save_dir
|
| 34 |
+
|
| 35 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 36 |
+
|
| 37 |
+
label_categories = extractor.categories()[AnnotationType.label]
|
| 38 |
+
label_ids = {label.name: idx
|
| 39 |
+
for idx, label in enumerate(label_categories.items)}
|
| 40 |
+
with open(osp.join(save_dir, 'obj.names'), 'w') as f:
|
| 41 |
+
f.writelines('%s\n' % l[0]
|
| 42 |
+
for l in sorted(label_ids.items(), key=lambda x: x[1]))
|
| 43 |
+
|
| 44 |
+
subset_lists = OrderedDict()
|
| 45 |
+
|
| 46 |
+
for subset_name, subset in self._extractor.subsets().items():
|
| 47 |
+
if not subset_name or subset_name == DEFAULT_SUBSET_NAME:
|
| 48 |
+
subset_name = YoloPath.DEFAULT_SUBSET_NAME
|
| 49 |
+
elif subset_name not in YoloPath.SUBSET_NAMES:
|
| 50 |
+
log.warn("Skipping subset export '%s'. "
|
| 51 |
+
"If specified, the only valid names are %s" % \
|
| 52 |
+
(subset_name, ', '.join(
|
| 53 |
+
"'%s'" % s for s in YoloPath.SUBSET_NAMES)))
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
subset_dir = osp.join(save_dir, 'obj_%s_data' % subset_name)
|
| 57 |
+
os.makedirs(subset_dir, exist_ok=True)
|
| 58 |
+
|
| 59 |
+
image_paths = OrderedDict()
|
| 60 |
+
|
| 61 |
+
for item in subset:
|
| 62 |
+
if not item.has_image:
|
| 63 |
+
raise Exception("Failed to export item '%s': "
|
| 64 |
+
"item has no image info" % item.id)
|
| 65 |
+
height, width = item.image.size
|
| 66 |
+
|
| 67 |
+
image_name = self._make_image_filename(item)
|
| 68 |
+
if self._save_images:
|
| 69 |
+
if item.has_image and item.image.has_data:
|
| 70 |
+
self._save_image(item, osp.join(subset_dir, image_name))
|
| 71 |
+
else:
|
| 72 |
+
log.warning("Item '%s' has no image" % item.id)
|
| 73 |
+
image_paths[item.id] = osp.join('data',
|
| 74 |
+
osp.basename(subset_dir), image_name)
|
| 75 |
+
|
| 76 |
+
yolo_annotation = ''
|
| 77 |
+
for bbox in item.annotations:
|
| 78 |
+
if bbox.type is not AnnotationType.bbox:
|
| 79 |
+
continue
|
| 80 |
+
if bbox.label is None:
|
| 81 |
+
continue
|
| 82 |
+
|
| 83 |
+
yolo_bb = _make_yolo_bbox((width, height), bbox.points)
|
| 84 |
+
yolo_bb = ' '.join('%.6f' % p for p in yolo_bb)
|
| 85 |
+
yolo_annotation += '%s %s\n' % (bbox.label, yolo_bb)
|
| 86 |
+
|
| 87 |
+
annotation_path = osp.join(subset_dir, '%s.txt' % item.id)
|
| 88 |
+
os.makedirs(osp.dirname(annotation_path), exist_ok=True)
|
| 89 |
+
with open(annotation_path, 'w') as f:
|
| 90 |
+
f.write(yolo_annotation)
|
| 91 |
+
|
| 92 |
+
subset_list_name = '%s.txt' % subset_name
|
| 93 |
+
subset_lists[subset_name] = subset_list_name
|
| 94 |
+
with open(osp.join(save_dir, subset_list_name), 'w') as f:
|
| 95 |
+
f.writelines('%s\n' % s for s in image_paths.values())
|
| 96 |
+
|
| 97 |
+
with open(osp.join(save_dir, 'obj.data'), 'w') as f:
|
| 98 |
+
f.write('classes = %s\n' % len(label_ids))
|
| 99 |
+
|
| 100 |
+
for subset_name, subset_list_name in subset_lists.items():
|
| 101 |
+
f.write('%s = %s\n' % (subset_name,
|
| 102 |
+
osp.join('data', subset_list_name)))
|
| 103 |
+
|
| 104 |
+
f.write('names = %s\n' % osp.join('data', 'obj.names'))
|
| 105 |
+
f.write('backup = backup/\n')
|
testbed/openvinotoolkit__datumaro/tests/test_project.py
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import os
|
| 3 |
+
import os.path as osp
|
| 4 |
+
|
| 5 |
+
from unittest import TestCase
|
| 6 |
+
|
| 7 |
+
from datumaro.components.project import Project, Environment, Dataset
|
| 8 |
+
from datumaro.components.config_model import Source, Model
|
| 9 |
+
from datumaro.components.launcher import Launcher, ModelTransform
|
| 10 |
+
from datumaro.components.extractor import (Extractor, DatasetItem,
|
| 11 |
+
Label, Mask, Points, Polygon, PolyLine, Bbox, Caption,
|
| 12 |
+
LabelCategories, AnnotationType
|
| 13 |
+
)
|
| 14 |
+
from datumaro.util.image import Image
|
| 15 |
+
from datumaro.components.config import Config, DefaultConfig, SchemaBuilder
|
| 16 |
+
from datumaro.components.dataset_filter import \
|
| 17 |
+
XPathDatasetFilter, XPathAnnotationsFilter, DatasetItemEncoder
|
| 18 |
+
from datumaro.util.test_utils import TestDir, compare_datasets
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ProjectTest(TestCase):
|
| 22 |
+
def test_project_generate(self):
|
| 23 |
+
src_config = Config({
|
| 24 |
+
'project_name': 'test_project',
|
| 25 |
+
'format_version': 1,
|
| 26 |
+
})
|
| 27 |
+
|
| 28 |
+
with TestDir() as test_dir:
|
| 29 |
+
project_path = test_dir
|
| 30 |
+
Project.generate(project_path, src_config)
|
| 31 |
+
|
| 32 |
+
self.assertTrue(osp.isdir(project_path))
|
| 33 |
+
|
| 34 |
+
result_config = Project.load(project_path).config
|
| 35 |
+
self.assertEqual(
|
| 36 |
+
src_config.project_name, result_config.project_name)
|
| 37 |
+
self.assertEqual(
|
| 38 |
+
src_config.format_version, result_config.format_version)
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
def test_default_ctor_is_ok():
|
| 42 |
+
Project()
|
| 43 |
+
|
| 44 |
+
@staticmethod
|
| 45 |
+
def test_empty_config_is_ok():
|
| 46 |
+
Project(Config())
|
| 47 |
+
|
| 48 |
+
def test_add_source(self):
|
| 49 |
+
source_name = 'source'
|
| 50 |
+
origin = Source({
|
| 51 |
+
'url': 'path',
|
| 52 |
+
'format': 'ext'
|
| 53 |
+
})
|
| 54 |
+
project = Project()
|
| 55 |
+
|
| 56 |
+
project.add_source(source_name, origin)
|
| 57 |
+
|
| 58 |
+
added = project.get_source(source_name)
|
| 59 |
+
self.assertIsNotNone(added)
|
| 60 |
+
self.assertEqual(added, origin)
|
| 61 |
+
|
| 62 |
+
def test_added_source_can_be_saved(self):
|
| 63 |
+
source_name = 'source'
|
| 64 |
+
origin = Source({
|
| 65 |
+
'url': 'path',
|
| 66 |
+
})
|
| 67 |
+
project = Project()
|
| 68 |
+
project.add_source(source_name, origin)
|
| 69 |
+
|
| 70 |
+
saved = project.config
|
| 71 |
+
|
| 72 |
+
self.assertEqual(origin, saved.sources[source_name])
|
| 73 |
+
|
| 74 |
+
def test_added_source_can_be_dumped(self):
|
| 75 |
+
source_name = 'source'
|
| 76 |
+
origin = Source({
|
| 77 |
+
'url': 'path',
|
| 78 |
+
})
|
| 79 |
+
project = Project()
|
| 80 |
+
project.add_source(source_name, origin)
|
| 81 |
+
|
| 82 |
+
with TestDir() as test_dir:
|
| 83 |
+
project.save(test_dir)
|
| 84 |
+
|
| 85 |
+
loaded = Project.load(test_dir)
|
| 86 |
+
loaded = loaded.get_source(source_name)
|
| 87 |
+
self.assertEqual(origin, loaded)
|
| 88 |
+
|
| 89 |
+
def test_can_import_with_custom_importer(self):
|
| 90 |
+
class TestImporter:
|
| 91 |
+
def __call__(self, path, subset=None):
|
| 92 |
+
return Project({
|
| 93 |
+
'project_filename': path,
|
| 94 |
+
'subsets': [ subset ]
|
| 95 |
+
})
|
| 96 |
+
|
| 97 |
+
path = 'path'
|
| 98 |
+
importer_name = 'test_importer'
|
| 99 |
+
|
| 100 |
+
env = Environment()
|
| 101 |
+
env.importers.register(importer_name, TestImporter)
|
| 102 |
+
|
| 103 |
+
project = Project.import_from(path, importer_name, env,
|
| 104 |
+
subset='train')
|
| 105 |
+
|
| 106 |
+
self.assertEqual(path, project.config.project_filename)
|
| 107 |
+
self.assertListEqual(['train'], project.config.subsets)
|
| 108 |
+
|
| 109 |
+
def test_can_dump_added_model(self):
|
| 110 |
+
model_name = 'model'
|
| 111 |
+
|
| 112 |
+
project = Project()
|
| 113 |
+
saved = Model({ 'launcher': 'name' })
|
| 114 |
+
project.add_model(model_name, saved)
|
| 115 |
+
|
| 116 |
+
with TestDir() as test_dir:
|
| 117 |
+
project.save(test_dir)
|
| 118 |
+
|
| 119 |
+
loaded = Project.load(test_dir)
|
| 120 |
+
loaded = loaded.get_model(model_name)
|
| 121 |
+
self.assertEqual(saved, loaded)
|
| 122 |
+
|
| 123 |
+
def test_can_have_project_source(self):
|
| 124 |
+
with TestDir() as test_dir:
|
| 125 |
+
Project.generate(test_dir)
|
| 126 |
+
|
| 127 |
+
project2 = Project()
|
| 128 |
+
project2.add_source('project1', {
|
| 129 |
+
'url': test_dir,
|
| 130 |
+
})
|
| 131 |
+
dataset = project2.make_dataset()
|
| 132 |
+
|
| 133 |
+
self.assertTrue('project1' in dataset.sources)
|
| 134 |
+
|
| 135 |
+
def test_can_batch_launch_custom_model(self):
|
| 136 |
+
dataset = Dataset.from_iterable([
|
| 137 |
+
DatasetItem(id=i, subset='train', image=np.array([i]))
|
| 138 |
+
for i in range(5)
|
| 139 |
+
], categories=['label'])
|
| 140 |
+
|
| 141 |
+
class TestLauncher(Launcher):
|
| 142 |
+
def launch(self, inputs):
|
| 143 |
+
for i, inp in enumerate(inputs):
|
| 144 |
+
yield [ Label(0, attributes={'idx': i, 'data': inp.item()}) ]
|
| 145 |
+
|
| 146 |
+
model_name = 'model'
|
| 147 |
+
launcher_name = 'custom_launcher'
|
| 148 |
+
|
| 149 |
+
project = Project()
|
| 150 |
+
project.env.launchers.register(launcher_name, TestLauncher)
|
| 151 |
+
project.add_model(model_name, { 'launcher': launcher_name })
|
| 152 |
+
model = project.make_executable_model(model_name)
|
| 153 |
+
|
| 154 |
+
batch_size = 3
|
| 155 |
+
executor = ModelTransform(dataset, model, batch_size=batch_size)
|
| 156 |
+
|
| 157 |
+
for item in executor:
|
| 158 |
+
self.assertEqual(1, len(item.annotations))
|
| 159 |
+
self.assertEqual(int(item.id) % batch_size,
|
| 160 |
+
item.annotations[0].attributes['idx'])
|
| 161 |
+
self.assertEqual(int(item.id),
|
| 162 |
+
item.annotations[0].attributes['data'])
|
| 163 |
+
|
| 164 |
+
def test_can_do_transform_with_custom_model(self):
|
| 165 |
+
class TestExtractorSrc(Extractor):
|
| 166 |
+
def __iter__(self):
|
| 167 |
+
for i in range(2):
|
| 168 |
+
yield DatasetItem(id=i, image=np.ones([2, 2, 3]) * i,
|
| 169 |
+
annotations=[Label(i)])
|
| 170 |
+
|
| 171 |
+
def categories(self):
|
| 172 |
+
label_cat = LabelCategories()
|
| 173 |
+
label_cat.add('0')
|
| 174 |
+
label_cat.add('1')
|
| 175 |
+
return { AnnotationType.label: label_cat }
|
| 176 |
+
|
| 177 |
+
class TestLauncher(Launcher):
|
| 178 |
+
def launch(self, inputs):
|
| 179 |
+
for inp in inputs:
|
| 180 |
+
yield [ Label(inp[0, 0, 0]) ]
|
| 181 |
+
|
| 182 |
+
class TestExtractorDst(Extractor):
|
| 183 |
+
def __init__(self, url):
|
| 184 |
+
super().__init__()
|
| 185 |
+
self.items = [osp.join(url, p) for p in sorted(os.listdir(url))]
|
| 186 |
+
|
| 187 |
+
def __iter__(self):
|
| 188 |
+
for path in self.items:
|
| 189 |
+
with open(path, 'r') as f:
|
| 190 |
+
index = osp.splitext(osp.basename(path))[0]
|
| 191 |
+
label = int(f.readline().strip())
|
| 192 |
+
yield DatasetItem(id=index, annotations=[Label(label)])
|
| 193 |
+
|
| 194 |
+
model_name = 'model'
|
| 195 |
+
launcher_name = 'custom_launcher'
|
| 196 |
+
extractor_name = 'custom_extractor'
|
| 197 |
+
|
| 198 |
+
project = Project()
|
| 199 |
+
project.env.launchers.register(launcher_name, TestLauncher)
|
| 200 |
+
project.env.extractors.register(extractor_name, TestExtractorSrc)
|
| 201 |
+
project.add_model(model_name, { 'launcher': launcher_name })
|
| 202 |
+
project.add_source('source', { 'format': extractor_name })
|
| 203 |
+
|
| 204 |
+
with TestDir() as test_dir:
|
| 205 |
+
project.make_dataset().apply_model(model=model_name,
|
| 206 |
+
save_dir=test_dir)
|
| 207 |
+
|
| 208 |
+
result = Project.load(test_dir)
|
| 209 |
+
result.env.extractors.register(extractor_name, TestExtractorDst)
|
| 210 |
+
it = iter(result.make_dataset())
|
| 211 |
+
item1 = next(it)
|
| 212 |
+
item2 = next(it)
|
| 213 |
+
self.assertEqual(0, item1.annotations[0].label)
|
| 214 |
+
self.assertEqual(1, item2.annotations[0].label)
|
| 215 |
+
|
| 216 |
+
def test_source_datasets_can_be_merged(self):
|
| 217 |
+
class TestExtractor(Extractor):
|
| 218 |
+
def __init__(self, url, n=0, s=0):
|
| 219 |
+
super().__init__(length=n)
|
| 220 |
+
self.n = n
|
| 221 |
+
self.s = s
|
| 222 |
+
|
| 223 |
+
def __iter__(self):
|
| 224 |
+
for i in range(self.n):
|
| 225 |
+
yield DatasetItem(id=self.s + i, subset='train')
|
| 226 |
+
|
| 227 |
+
e_name1 = 'e1'
|
| 228 |
+
e_name2 = 'e2'
|
| 229 |
+
n1 = 2
|
| 230 |
+
n2 = 4
|
| 231 |
+
|
| 232 |
+
project = Project()
|
| 233 |
+
project.env.extractors.register(e_name1, lambda p: TestExtractor(p, n=n1))
|
| 234 |
+
project.env.extractors.register(e_name2, lambda p: TestExtractor(p, n=n2, s=n1))
|
| 235 |
+
project.add_source('source1', { 'format': e_name1 })
|
| 236 |
+
project.add_source('source2', { 'format': e_name2 })
|
| 237 |
+
|
| 238 |
+
dataset = project.make_dataset()
|
| 239 |
+
|
| 240 |
+
self.assertEqual(n1 + n2, len(dataset))
|
| 241 |
+
|
| 242 |
+
def test_cant_merge_different_categories(self):
|
| 243 |
+
class TestExtractor1(Extractor):
|
| 244 |
+
def __iter__(self):
|
| 245 |
+
return iter([])
|
| 246 |
+
|
| 247 |
+
def categories(self):
|
| 248 |
+
return { AnnotationType.label:
|
| 249 |
+
LabelCategories.from_iterable(['a', 'b']) }
|
| 250 |
+
|
| 251 |
+
class TestExtractor2(Extractor):
|
| 252 |
+
def __iter__(self):
|
| 253 |
+
return iter([])
|
| 254 |
+
|
| 255 |
+
def categories(self):
|
| 256 |
+
return { AnnotationType.label:
|
| 257 |
+
LabelCategories.from_iterable(['b', 'a']) }
|
| 258 |
+
|
| 259 |
+
e_name1 = 'e1'
|
| 260 |
+
e_name2 = 'e2'
|
| 261 |
+
|
| 262 |
+
project = Project()
|
| 263 |
+
project.env.extractors.register(e_name1, TestExtractor1)
|
| 264 |
+
project.env.extractors.register(e_name2, TestExtractor2)
|
| 265 |
+
project.add_source('source1', { 'format': e_name1 })
|
| 266 |
+
project.add_source('source2', { 'format': e_name2 })
|
| 267 |
+
|
| 268 |
+
with self.assertRaisesRegex(Exception, "different categories"):
|
| 269 |
+
project.make_dataset()
|
| 270 |
+
|
| 271 |
+
def test_project_filter_can_be_applied(self):
|
| 272 |
+
class TestExtractor(Extractor):
|
| 273 |
+
def __iter__(self):
|
| 274 |
+
for i in range(10):
|
| 275 |
+
yield DatasetItem(id=i, subset='train')
|
| 276 |
+
|
| 277 |
+
e_type = 'type'
|
| 278 |
+
project = Project()
|
| 279 |
+
project.env.extractors.register(e_type, TestExtractor)
|
| 280 |
+
project.add_source('source', { 'format': e_type })
|
| 281 |
+
|
| 282 |
+
dataset = project.make_dataset().filter('/item[id < 5]')
|
| 283 |
+
|
| 284 |
+
self.assertEqual(5, len(dataset))
|
| 285 |
+
|
| 286 |
+
def test_can_save_and_load_own_dataset(self):
|
| 287 |
+
with TestDir() as test_dir:
|
| 288 |
+
src_project = Project()
|
| 289 |
+
src_dataset = src_project.make_dataset()
|
| 290 |
+
item = DatasetItem(id=1)
|
| 291 |
+
src_dataset.put(item)
|
| 292 |
+
src_dataset.save(test_dir)
|
| 293 |
+
|
| 294 |
+
loaded_project = Project.load(test_dir)
|
| 295 |
+
loaded_dataset = loaded_project.make_dataset()
|
| 296 |
+
|
| 297 |
+
self.assertEqual(list(src_dataset), list(loaded_dataset))
|
| 298 |
+
|
| 299 |
+
def test_project_own_dataset_can_be_modified(self):
|
| 300 |
+
project = Project()
|
| 301 |
+
dataset = project.make_dataset()
|
| 302 |
+
|
| 303 |
+
item = DatasetItem(id=1)
|
| 304 |
+
dataset.put(item)
|
| 305 |
+
|
| 306 |
+
self.assertEqual(item, next(iter(dataset)))
|
| 307 |
+
|
| 308 |
+
def test_project_compound_child_can_be_modified_recursively(self):
|
| 309 |
+
with TestDir() as test_dir:
|
| 310 |
+
child1 = Project({
|
| 311 |
+
'project_dir': osp.join(test_dir, 'child1'),
|
| 312 |
+
})
|
| 313 |
+
child1.save()
|
| 314 |
+
|
| 315 |
+
child2 = Project({
|
| 316 |
+
'project_dir': osp.join(test_dir, 'child2'),
|
| 317 |
+
})
|
| 318 |
+
child2.save()
|
| 319 |
+
|
| 320 |
+
parent = Project()
|
| 321 |
+
parent.add_source('child1', {
|
| 322 |
+
'url': child1.config.project_dir
|
| 323 |
+
})
|
| 324 |
+
parent.add_source('child2', {
|
| 325 |
+
'url': child2.config.project_dir
|
| 326 |
+
})
|
| 327 |
+
dataset = parent.make_dataset()
|
| 328 |
+
|
| 329 |
+
item1 = DatasetItem(id='ch1', path=['child1'])
|
| 330 |
+
item2 = DatasetItem(id='ch2', path=['child2'])
|
| 331 |
+
dataset.put(item1)
|
| 332 |
+
dataset.put(item2)
|
| 333 |
+
|
| 334 |
+
self.assertEqual(2, len(dataset))
|
| 335 |
+
self.assertEqual(1, len(dataset.sources['child1']))
|
| 336 |
+
self.assertEqual(1, len(dataset.sources['child2']))
|
| 337 |
+
|
| 338 |
+
def test_project_can_merge_item_annotations(self):
|
| 339 |
+
class TestExtractor1(Extractor):
|
| 340 |
+
def __iter__(self):
|
| 341 |
+
yield DatasetItem(id=1, subset='train', annotations=[
|
| 342 |
+
Label(2, id=3),
|
| 343 |
+
Label(3, attributes={ 'x': 1 }),
|
| 344 |
+
])
|
| 345 |
+
|
| 346 |
+
class TestExtractor2(Extractor):
|
| 347 |
+
def __iter__(self):
|
| 348 |
+
yield DatasetItem(id=1, subset='train', annotations=[
|
| 349 |
+
Label(3, attributes={ 'x': 1 }),
|
| 350 |
+
Label(4, id=4),
|
| 351 |
+
])
|
| 352 |
+
|
| 353 |
+
project = Project()
|
| 354 |
+
project.env.extractors.register('t1', TestExtractor1)
|
| 355 |
+
project.env.extractors.register('t2', TestExtractor2)
|
| 356 |
+
project.add_source('source1', { 'format': 't1' })
|
| 357 |
+
project.add_source('source2', { 'format': 't2' })
|
| 358 |
+
|
| 359 |
+
merged = project.make_dataset()
|
| 360 |
+
|
| 361 |
+
self.assertEqual(1, len(merged))
|
| 362 |
+
|
| 363 |
+
item = next(iter(merged))
|
| 364 |
+
self.assertEqual(3, len(item.annotations))
|
| 365 |
+
|
| 366 |
+
class DatasetFilterTest(TestCase):
|
| 367 |
+
@staticmethod
|
| 368 |
+
def test_item_representations():
|
| 369 |
+
item = DatasetItem(id=1, subset='subset', path=['a', 'b'],
|
| 370 |
+
image=np.ones((5, 4, 3)),
|
| 371 |
+
annotations=[
|
| 372 |
+
Label(0, attributes={'a1': 1, 'a2': '2'}, id=1, group=2),
|
| 373 |
+
Caption('hello', id=1),
|
| 374 |
+
Caption('world', group=5),
|
| 375 |
+
Label(2, id=3, attributes={ 'x': 1, 'y': '2' }),
|
| 376 |
+
Bbox(1, 2, 3, 4, label=4, id=4, attributes={ 'a': 1.0 }),
|
| 377 |
+
Bbox(5, 6, 7, 8, id=5, group=5),
|
| 378 |
+
Points([1, 2, 2, 0, 1, 1], label=0, id=5),
|
| 379 |
+
Mask(id=5, image=np.ones((3, 2))),
|
| 380 |
+
Mask(label=3, id=5, image=np.ones((2, 3))),
|
| 381 |
+
PolyLine([1, 2, 3, 4, 5, 6, 7, 8], id=11),
|
| 382 |
+
Polygon([1, 2, 3, 4, 5, 6, 7, 8]),
|
| 383 |
+
]
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
encoded = DatasetItemEncoder.encode(item)
|
| 387 |
+
DatasetItemEncoder.to_string(encoded)
|
| 388 |
+
|
| 389 |
+
def test_item_filter_can_be_applied(self):
|
| 390 |
+
class TestExtractor(Extractor):
|
| 391 |
+
def __iter__(self):
|
| 392 |
+
for i in range(4):
|
| 393 |
+
yield DatasetItem(id=i, subset='train')
|
| 394 |
+
|
| 395 |
+
extractor = TestExtractor()
|
| 396 |
+
|
| 397 |
+
filtered = XPathDatasetFilter(extractor, '/item[id > 1]')
|
| 398 |
+
|
| 399 |
+
self.assertEqual(2, len(filtered))
|
| 400 |
+
|
| 401 |
+
def test_annotations_filter_can_be_applied(self):
|
| 402 |
+
class SrcExtractor(Extractor):
|
| 403 |
+
def __iter__(self):
|
| 404 |
+
return iter([
|
| 405 |
+
DatasetItem(id=0),
|
| 406 |
+
DatasetItem(id=1, annotations=[
|
| 407 |
+
Label(0),
|
| 408 |
+
Label(1),
|
| 409 |
+
]),
|
| 410 |
+
DatasetItem(id=2, annotations=[
|
| 411 |
+
Label(0),
|
| 412 |
+
Label(2),
|
| 413 |
+
]),
|
| 414 |
+
])
|
| 415 |
+
|
| 416 |
+
class DstExtractor(Extractor):
|
| 417 |
+
def __iter__(self):
|
| 418 |
+
return iter([
|
| 419 |
+
DatasetItem(id=0),
|
| 420 |
+
DatasetItem(id=1, annotations=[
|
| 421 |
+
Label(0),
|
| 422 |
+
]),
|
| 423 |
+
DatasetItem(id=2, annotations=[
|
| 424 |
+
Label(0),
|
| 425 |
+
]),
|
| 426 |
+
])
|
| 427 |
+
|
| 428 |
+
extractor = SrcExtractor()
|
| 429 |
+
|
| 430 |
+
filtered = XPathAnnotationsFilter(extractor,
|
| 431 |
+
'/item/annotation[label_id = 0]')
|
| 432 |
+
|
| 433 |
+
self.assertListEqual(list(filtered), list(DstExtractor()))
|
| 434 |
+
|
| 435 |
+
def test_annotations_filter_can_remove_empty_items(self):
|
| 436 |
+
class SrcExtractor(Extractor):
|
| 437 |
+
def __iter__(self):
|
| 438 |
+
return iter([
|
| 439 |
+
DatasetItem(id=0),
|
| 440 |
+
DatasetItem(id=1, annotations=[
|
| 441 |
+
Label(0),
|
| 442 |
+
Label(1),
|
| 443 |
+
]),
|
| 444 |
+
DatasetItem(id=2, annotations=[
|
| 445 |
+
Label(0),
|
| 446 |
+
Label(2),
|
| 447 |
+
]),
|
| 448 |
+
])
|
| 449 |
+
|
| 450 |
+
class DstExtractor(Extractor):
|
| 451 |
+
def __iter__(self):
|
| 452 |
+
return iter([
|
| 453 |
+
DatasetItem(id=2, annotations=[
|
| 454 |
+
Label(2),
|
| 455 |
+
]),
|
| 456 |
+
])
|
| 457 |
+
|
| 458 |
+
extractor = SrcExtractor()
|
| 459 |
+
|
| 460 |
+
filtered = XPathAnnotationsFilter(extractor,
|
| 461 |
+
'/item/annotation[label_id = 2]', remove_empty=True)
|
| 462 |
+
|
| 463 |
+
self.assertListEqual(list(filtered), list(DstExtractor()))
|
| 464 |
+
|
| 465 |
+
class ConfigTest(TestCase):
|
| 466 |
+
def test_can_produce_multilayer_config_from_dict(self):
|
| 467 |
+
schema_low = SchemaBuilder() \
|
| 468 |
+
.add('options', dict) \
|
| 469 |
+
.build()
|
| 470 |
+
schema_mid = SchemaBuilder() \
|
| 471 |
+
.add('desc', lambda: Config(schema=schema_low)) \
|
| 472 |
+
.build()
|
| 473 |
+
schema_top = SchemaBuilder() \
|
| 474 |
+
.add('container', lambda: DefaultConfig(
|
| 475 |
+
lambda v: Config(v, schema=schema_mid))) \
|
| 476 |
+
.build()
|
| 477 |
+
|
| 478 |
+
value = 1
|
| 479 |
+
source = Config({
|
| 480 |
+
'container': {
|
| 481 |
+
'elem': {
|
| 482 |
+
'desc': {
|
| 483 |
+
'options': {
|
| 484 |
+
'k': value
|
| 485 |
+
}
|
| 486 |
+
}
|
| 487 |
+
}
|
| 488 |
+
}
|
| 489 |
+
}, schema=schema_top)
|
| 490 |
+
|
| 491 |
+
self.assertEqual(value, source.container['elem'].desc.options['k'])
|
| 492 |
+
|
| 493 |
+
class ExtractorTest(TestCase):
|
| 494 |
+
def test_custom_extractor_can_be_created(self):
|
| 495 |
+
class CustomExtractor(Extractor):
|
| 496 |
+
def __iter__(self):
|
| 497 |
+
return iter([
|
| 498 |
+
DatasetItem(id=0, subset='train'),
|
| 499 |
+
DatasetItem(id=1, subset='train'),
|
| 500 |
+
DatasetItem(id=2, subset='train'),
|
| 501 |
+
|
| 502 |
+
DatasetItem(id=3, subset='test'),
|
| 503 |
+
DatasetItem(id=4, subset='test'),
|
| 504 |
+
|
| 505 |
+
DatasetItem(id=1),
|
| 506 |
+
DatasetItem(id=2),
|
| 507 |
+
DatasetItem(id=3),
|
| 508 |
+
])
|
| 509 |
+
|
| 510 |
+
extractor_name = 'ext1'
|
| 511 |
+
project = Project()
|
| 512 |
+
project.env.extractors.register(extractor_name, CustomExtractor)
|
| 513 |
+
project.add_source('src1', {
|
| 514 |
+
'url': 'path',
|
| 515 |
+
'format': extractor_name,
|
| 516 |
+
})
|
| 517 |
+
|
| 518 |
+
dataset = project.make_dataset()
|
| 519 |
+
|
| 520 |
+
compare_datasets(self, CustomExtractor(), dataset)
|
| 521 |
+
|
| 522 |
+
class DatasetTest(TestCase):
|
| 523 |
+
def test_create_from_extractors(self):
|
| 524 |
+
class SrcExtractor1(Extractor):
|
| 525 |
+
def __iter__(self):
|
| 526 |
+
return iter([
|
| 527 |
+
DatasetItem(id=1, subset='train', annotations=[
|
| 528 |
+
Bbox(1, 2, 3, 4),
|
| 529 |
+
Label(4),
|
| 530 |
+
]),
|
| 531 |
+
DatasetItem(id=1, subset='val', annotations=[
|
| 532 |
+
Label(4),
|
| 533 |
+
]),
|
| 534 |
+
])
|
| 535 |
+
|
| 536 |
+
class SrcExtractor2(Extractor):
|
| 537 |
+
def __iter__(self):
|
| 538 |
+
return iter([
|
| 539 |
+
DatasetItem(id=1, subset='val', annotations=[
|
| 540 |
+
Label(5),
|
| 541 |
+
]),
|
| 542 |
+
])
|
| 543 |
+
|
| 544 |
+
class DstExtractor(Extractor):
|
| 545 |
+
def __iter__(self):
|
| 546 |
+
return iter([
|
| 547 |
+
DatasetItem(id=1, subset='train', annotations=[
|
| 548 |
+
Bbox(1, 2, 3, 4),
|
| 549 |
+
Label(4),
|
| 550 |
+
]),
|
| 551 |
+
DatasetItem(id=1, subset='val', annotations=[
|
| 552 |
+
Label(4),
|
| 553 |
+
Label(5),
|
| 554 |
+
]),
|
| 555 |
+
])
|
| 556 |
+
|
| 557 |
+
dataset = Dataset.from_extractors(SrcExtractor1(), SrcExtractor2())
|
| 558 |
+
|
| 559 |
+
compare_datasets(self, DstExtractor(), dataset)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
class DatasetItemTest(TestCase):
|
| 563 |
+
def test_ctor_requires_id(self):
|
| 564 |
+
with self.assertRaises(Exception):
|
| 565 |
+
# pylint: disable=no-value-for-parameter
|
| 566 |
+
DatasetItem()
|
| 567 |
+
# pylint: enable=no-value-for-parameter
|
| 568 |
+
|
| 569 |
+
@staticmethod
|
| 570 |
+
def test_ctors_with_image():
|
| 571 |
+
for args in [
|
| 572 |
+
{ 'id': 0, 'image': None },
|
| 573 |
+
{ 'id': 0, 'image': 'path.jpg' },
|
| 574 |
+
{ 'id': 0, 'image': np.array([1, 2, 3]) },
|
| 575 |
+
{ 'id': 0, 'image': lambda f: np.array([1, 2, 3]) },
|
| 576 |
+
{ 'id': 0, 'image': Image(data=np.array([1, 2, 3])) },
|
| 577 |
+
]:
|
| 578 |
+
DatasetItem(**args)
|