hc99 commited on
Commit
d21cb06
·
verified ·
1 Parent(s): 93d7919

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. testbed/openvinotoolkit__datumaro/.bandit +8 -0
  2. testbed/openvinotoolkit__datumaro/.coveragerc +34 -0
  3. testbed/openvinotoolkit__datumaro/.gitattributes +29 -0
  4. testbed/openvinotoolkit__datumaro/.gitignore +57 -0
  5. testbed/openvinotoolkit__datumaro/.pylintrc +420 -0
  6. testbed/openvinotoolkit__datumaro/.travis.yml +37 -0
  7. testbed/openvinotoolkit__datumaro/CHANGELOG.md +145 -0
  8. testbed/openvinotoolkit__datumaro/CONTRIBUTING.md +99 -0
  9. testbed/openvinotoolkit__datumaro/LICENSE +22 -0
  10. testbed/openvinotoolkit__datumaro/README.md +230 -0
  11. testbed/openvinotoolkit__datumaro/datum.py +8 -0
  12. testbed/openvinotoolkit__datumaro/datumaro/__init__.py +4 -0
  13. testbed/openvinotoolkit__datumaro/datumaro/__main__.py +12 -0
  14. testbed/openvinotoolkit__datumaro/datumaro/cli/commands/merge.py +124 -0
  15. testbed/openvinotoolkit__datumaro/datumaro/plugins/__init__.py +0 -0
  16. testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/launcher.py +37 -0
  17. testbed/openvinotoolkit__datumaro/datumaro/plugins/camvid_format.py +344 -0
  18. testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/format.py +9 -0
  19. testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/__init__.py +0 -0
  20. testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/converter.py +258 -0
  21. testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/format.py +12 -0
  22. testbed/openvinotoolkit__datumaro/datumaro/plugins/image_dir.py +52 -0
  23. testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_format.py +90 -0
  24. testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_txt_format.py +105 -0
  25. testbed/openvinotoolkit__datumaro/datumaro/plugins/labelme_format.py +393 -0
  26. testbed/openvinotoolkit__datumaro/datumaro/plugins/mot_format.py +271 -0
  27. testbed/openvinotoolkit__datumaro/datumaro/plugins/mots_format.py +145 -0
  28. testbed/openvinotoolkit__datumaro/datumaro/plugins/openvino_launcher.py +188 -0
  29. testbed/openvinotoolkit__datumaro/datumaro/plugins/splitter.py +522 -0
  30. testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/converter.py +212 -0
  31. testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/extractor.py +187 -0
  32. testbed/openvinotoolkit__datumaro/datumaro/plugins/transforms.py +559 -0
  33. testbed/openvinotoolkit__datumaro/datumaro/plugins/vgg_face2_format.py +139 -0
  34. testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/__init__.py +0 -0
  35. testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/converter.py +579 -0
  36. testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/format.py +206 -0
  37. testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/importer.py +77 -0
  38. testbed/openvinotoolkit__datumaro/datumaro/plugins/widerface_format.py +122 -0
  39. testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/__init__.py +0 -0
  40. testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/extractor.py +197 -0
  41. testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/format.py +11 -0
  42. testbed/openvinotoolkit__datumaro/datumaro/util/__init__.py +93 -0
  43. testbed/openvinotoolkit__datumaro/datumaro/util/annotation_util.py +212 -0
  44. testbed/openvinotoolkit__datumaro/datumaro/util/attrs_util.py +33 -0
  45. testbed/openvinotoolkit__datumaro/datumaro/util/command_targets.py +113 -0
  46. testbed/openvinotoolkit__datumaro/datumaro/util/image.py +295 -0
  47. testbed/openvinotoolkit__datumaro/datumaro/util/image_cache.py +42 -0
  48. testbed/openvinotoolkit__datumaro/datumaro/util/log_utils.py +16 -0
  49. testbed/openvinotoolkit__datumaro/datumaro/util/mask_tools.py +288 -0
  50. testbed/openvinotoolkit__datumaro/datumaro/util/os_util.py +17 -0
testbed/openvinotoolkit__datumaro/.bandit ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ [bandit]
2
+ # B101 : assert_used
3
+ # B102 : exec_used
4
+ # B320 : xml_bad_etree
5
+ # B404 : import_subprocess
6
+ # B406 : import_xml_sax
7
+ # B410 : import_lxml
8
+ skips: B101,B102,B320,B404,B406,B410
testbed/openvinotoolkit__datumaro/.coveragerc ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [run]
2
+ branch = true
3
+ # relative_files = true # does not work?
4
+
5
+ source =
6
+ datumaro/
7
+
8
+ omit =
9
+ datumaro/__main__.py
10
+ datumaro/version.py
11
+ tests/*
12
+
13
+ [report]
14
+ # Regexes for lines to exclude from consideration
15
+ exclude_lines =
16
+ # Have to re-enable the standard pragma
17
+ pragma: no cover
18
+
19
+ # Don't complain about missing debug-only code:
20
+ def __repr__
21
+ if\s+[\w\.()]+\.isEnabledFor\(log\.DEBUG\):
22
+
23
+ # Don't complain if tests don't hit defensive assertion code:
24
+ raise AssertionError
25
+ raise NotImplementedError
26
+
27
+ # Don't complain if non-runnable code isn't run:
28
+ if 0:
29
+ if __name__ == .__main__.:
30
+
31
+ # don't fail on the code that can be found
32
+ ignore_errors = true
33
+
34
+ skip_empty = true
testbed/openvinotoolkit__datumaro/.gitattributes ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4
2
+
3
+ .git* text export-ignore
4
+
5
+ *.txt text
6
+ *.htm text
7
+ *.html text
8
+ *.js text
9
+ *.py text
10
+ *.css text
11
+ *.md text
12
+ *.yml text
13
+ Dockerfile text
14
+ LICENSE text
15
+ *.conf text
16
+ *.mimetypes text
17
+ *.sh text eol=lf
18
+
19
+ *.avi binary
20
+ *.bmp binary
21
+ *.exr binary
22
+ *.ico binary
23
+ *.jpeg binary
24
+ *.jpg binary
25
+ *.png binary
26
+ *.gif binary
27
+ *.ttf binary
28
+ *.pdf binary
29
+
testbed/openvinotoolkit__datumaro/.gitignore ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+
3
+ # Byte-compiled / optimized / DLL files
4
+ __pycache__/
5
+ *.py[cod]
6
+ *$py.class
7
+
8
+ # Environments
9
+ .env
10
+ .venv
11
+ env/
12
+ venv/
13
+ ENV/
14
+ env.bak/
15
+ venv.bak/
16
+
17
+ # Distribution / packaging
18
+ .Python
19
+ build/
20
+ develop-eggs/
21
+ dist/
22
+ downloads/
23
+ eggs/
24
+ .eggs/
25
+ lib/
26
+ lib64/
27
+ parts/
28
+ sdist/
29
+ var/
30
+ wheels/
31
+ share/python-wheels/
32
+ *.egg-info/
33
+ .installed.cfg
34
+ *.egg
35
+ MANIFEST
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+ cover/
55
+
56
+ # Sphinx documentation
57
+ docs/_build/
testbed/openvinotoolkit__datumaro/.pylintrc ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MASTER]
2
+
3
+ # Specify a configuration file.
4
+ #rcfile=
5
+
6
+ # Python code to execute, usually for sys.path manipulation such as
7
+ # pygtk.require().
8
+ #init-hook=
9
+
10
+ # Add files or directories to the blacklist. They should be base names, not
11
+ # paths.
12
+ ignore=CVS
13
+
14
+ # Add files or directories matching the regex patterns to the blacklist. The
15
+ # regex matches against base names, not paths.
16
+ ignore-patterns=
17
+
18
+ # Pickle collected data for later comparisons.
19
+ persistent=yes
20
+
21
+ # List of plugins (as comma separated values of python modules names) to load,
22
+ # usually to register additional checkers.
23
+ load-plugins=
24
+
25
+ # Use multiple processes to speed up Pylint.
26
+ jobs=1
27
+
28
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
29
+ # active Python interpreter and may run arbitrary code.
30
+ unsafe-load-any-extension=no
31
+
32
+ # A comma-separated list of package or module names from where C extensions may
33
+ # be loaded. Extensions are loading into the active Python interpreter and may
34
+ # run arbitrary code
35
+ extension-pkg-whitelist=
36
+
37
+ # Allow optimization of some AST trees. This will activate a peephole AST
38
+ # optimizer, which will apply various small optimizations. For instance, it can
39
+ # be used to obtain the result of joining multiple strings with the addition
40
+ # operator. Joining a lot of strings can lead to a maximum recursion error in
41
+ # Pylint and this flag can prevent that. It has one side effect, the resulting
42
+ # AST will be different than the one from reality. This option is deprecated
43
+ # and it will be removed in Pylint 2.0.
44
+ optimize-ast=no
45
+
46
+
47
+ [MESSAGES CONTROL]
48
+
49
+ # Only show warnings with the listed confidence levels. Leave empty to show
50
+ # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
51
+ confidence=
52
+
53
+ # Enable the message, report, category or checker with the given id(s). You can
54
+ # either give multiple identifier separated by comma (,) or put this option
55
+ # multiple time (only on the command line, not in the configuration file where
56
+ # it should appear only once). See also the "--disable" option for examples.
57
+ disable=all
58
+ enable= E0001,E0100,E0101,E0102,E0103,E0104,E0105,E0106,E0107,E0110,
59
+ E0113,E0114,E0115,E0116,E0117,E0108,E0202,E0203,E0211,E0236,
60
+ E0238,E0239,E0240,E0241,E0301,E0302,E0601,E0603,E0604,E0701,
61
+ E0702,E0703,E0704,E0710,E0711,E0712,E1003,E1102,E1111,E0112,
62
+ E1120,E1121,E1123,E1124,E1125,E1126,E1127,E1132,E1200,E1201,
63
+ E1205,E1206,E1300,E1301,E1302,E1303,E1304,E1305,E1306,
64
+ C0123,C0200,C0303,C1001,
65
+ W0101,W0102,W0104,W0105,W0106,W0107,W0108,W0109,W0110,W0120,
66
+ W0122,W0124,W0150,W0199,W0221,W0222,W0233,W0404,W0410,W0601,
67
+ W0602,W0604,W0611,W0612,W0622,W0623,W0702,W0705,W0711,W1300,
68
+ W1301,W1302,W1303,,W1305,W1306,W1307
69
+ R0102,R0202,R0203
70
+
71
+
72
+ # Disable the message, report, category or checker with the given id(s). You
73
+ # can either give multiple identifiers separated by comma (,) or put this
74
+ # option multiple times (only on the command line, not in the configuration
75
+ # file where it should appear only once).You can also use "--disable=all" to
76
+ # disable everything first and then reenable specific checks. For example, if
77
+ # you want to run only the similarities checker, you can use "--disable=all
78
+ # --enable=similarities". If you want to run only the classes checker, but have
79
+ # no Warning level messages displayed, use"--disable=all --enable=classes
80
+ # --disable=W"
81
+ #disable=old-octal-literal,basestring-builtin,no-absolute-import,old-division,coerce-method,long-suffix,reload-builtin,unichr-builtin,indexing-exception,raising-string,dict-iter-method,metaclass-assignment,filter-builtin-not-iterating,import-star-module-level,next-method-called,cmp-method,raw_input-builtin,old-raise-syntax,cmp-builtin,apply-builtin,getslice-method,input-builtin,backtick,coerce-builtin,range-builtin-not-iterating,xrange-builtin,using-cmp-argument,buffer-builtin,hex-method,execfile-builtin,unpacking-in-except,standarderror-builtin,round-builtin,nonzero-method,unicode-builtin,reduce-builtin,file-builtin,dict-view-method,old-ne-operator,print-statement,suppressed-message,oct-method,useless-suppression,delslice-method,long-builtin,setslice-method,zip-builtin-not-iterating,map-builtin-not-iterating,intern-builtin,parameter-unpacking
82
+
83
+
84
+ [REPORTS]
85
+
86
+ # Set the output format. Available formats are text, parseable, colorized, msvs
87
+ # (visual studio) and html. You can also give a reporter class, eg
88
+ # mypackage.mymodule.MyReporterClass.
89
+ output-format=text
90
+
91
+ # Put messages in a separate file for each module / package specified on the
92
+ # command line instead of printing them on stdout. Reports (if any) will be
93
+ # written in a file name "pylint_global.[txt|html]". This option is deprecated
94
+ # and it will be removed in Pylint 2.0.
95
+ files-output=no
96
+
97
+ # Tells whether to display a full report or only the messages
98
+ reports=yes
99
+
100
+ # Python expression which should return a note less than 10 (10 is the highest
101
+ # note). You have access to the variables errors warning, statement which
102
+ # respectively contain the number of errors / warnings messages and the total
103
+ # number of statements analyzed. This is used by the global evaluation report
104
+ # (RP0004).
105
+ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
106
+
107
+ # Template used to display messages. This is a python new-style format string
108
+ # used to format the message information. See doc for all details
109
+ #msg-template=
110
+
111
+
112
+ [BASIC]
113
+
114
+ # Good variable names which should always be accepted, separated by a comma
115
+ good-names=i,j,k,ex,Run,_
116
+
117
+ # Bad variable names which should always be refused, separated by a comma
118
+ bad-names=foo,bar,baz,toto,tutu,tata
119
+
120
+ # Colon-delimited sets of names that determine each other's naming style when
121
+ # the name regexes allow several styles.
122
+ name-group=
123
+
124
+ # Include a hint for the correct naming format with invalid-name
125
+ include-naming-hint=no
126
+
127
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
128
+ # to this list to register other decorators that produce valid properties.
129
+ property-classes=abc.abstractproperty
130
+
131
+ # Regular expression matching correct constant names
132
+ const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
133
+
134
+ # Naming hint for constant names
135
+ const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
136
+
137
+ # Regular expression matching correct class names
138
+ class-rgx=[A-Z_][a-zA-Z0-9]+$
139
+
140
+ # Naming hint for class names
141
+ class-name-hint=[A-Z_][a-zA-Z0-9]+$
142
+
143
+ # Regular expression matching correct argument names
144
+ argument-rgx=[a-z_][a-z0-9_]{2,30}$
145
+
146
+ # Naming hint for argument names
147
+ argument-name-hint=[a-z_][a-z0-9_]{2,30}$
148
+
149
+ # Regular expression matching correct variable names
150
+ variable-rgx=[a-z_][a-z0-9_]{2,30}$
151
+
152
+ # Naming hint for variable names
153
+ variable-name-hint=[a-z_][a-z0-9_]{2,30}$
154
+
155
+ # Regular expression matching correct class attribute names
156
+ class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
157
+
158
+ # Naming hint for class attribute names
159
+ class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
160
+
161
+ # Regular expression matching correct method names
162
+ method-rgx=[a-z_][a-z0-9_]{2,30}$
163
+
164
+ # Naming hint for method names
165
+ method-name-hint=[a-z_][a-z0-9_]{2,30}$
166
+
167
+ # Regular expression matching correct module names
168
+ module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
169
+
170
+ # Naming hint for module names
171
+ module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
172
+
173
+ # Regular expression matching correct function names
174
+ function-rgx=[a-z_][a-z0-9_]{2,30}$
175
+
176
+ # Naming hint for function names
177
+ function-name-hint=[a-z_][a-z0-9_]{2,30}$
178
+
179
+ # Regular expression matching correct attribute names
180
+ attr-rgx=[a-z_][a-z0-9_]{2,30}$
181
+
182
+ # Naming hint for attribute names
183
+ attr-name-hint=[a-z_][a-z0-9_]{2,30}$
184
+
185
+ # Regular expression matching correct inline iteration names
186
+ inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
187
+
188
+ # Naming hint for inline iteration names
189
+ inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
190
+
191
+ # Regular expression which should only match function or class names that do
192
+ # not require a docstring.
193
+ no-docstring-rgx=^_
194
+
195
+ # Minimum line length for functions/classes that require docstrings, shorter
196
+ # ones are exempt.
197
+ docstring-min-length=-1
198
+
199
+
200
+ [ELIF]
201
+
202
+ # Maximum number of nested blocks for function / method body
203
+ max-nested-blocks=5
204
+
205
+
206
+ [FORMAT]
207
+
208
+ # Maximum number of characters on a single line.
209
+ max-line-length=80
210
+
211
+ # Regexp for a line that is allowed to be longer than the limit.
212
+ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
213
+
214
+ # Allow the body of an if to be on the same line as the test if there is no
215
+ # else.
216
+ single-line-if-stmt=no
217
+
218
+ # List of optional constructs for which whitespace checking is disabled. `dict-
219
+ # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
220
+ # `trailing-comma` allows a space between comma and closing bracket: (a, ).
221
+ # `empty-line` allows space-only lines.
222
+ no-space-check=trailing-comma,dict-separator
223
+
224
+ # Maximum number of lines in a module
225
+ max-module-lines=1000
226
+
227
+ # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
228
+ # tab).
229
+ indent-string=' '
230
+
231
+ # Number of spaces of indent required inside a hanging or continued line.
232
+ indent-after-paren=4
233
+
234
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
235
+ expected-line-ending-format=
236
+
237
+
238
+ [LOGGING]
239
+
240
+ # Logging modules to check that the string format arguments are in logging
241
+ # function parameter format
242
+ logging-modules=logging
243
+
244
+
245
+ [MISCELLANEOUS]
246
+
247
+ # List of note tags to take in consideration, separated by a comma.
248
+ notes=FIXME,XXX,TODO
249
+
250
+
251
+ [SIMILARITIES]
252
+
253
+ # Minimum lines number of a similarity.
254
+ min-similarity-lines=4
255
+
256
+ # Ignore comments when computing similarities.
257
+ ignore-comments=yes
258
+
259
+ # Ignore docstrings when computing similarities.
260
+ ignore-docstrings=yes
261
+
262
+ # Ignore imports when computing similarities.
263
+ ignore-imports=no
264
+
265
+
266
+ [SPELLING]
267
+
268
+ # Spelling dictionary name. Available dictionaries: none. To make it working
269
+ # install python-enchant package.
270
+ spelling-dict=
271
+
272
+ # List of comma separated words that should not be checked.
273
+ spelling-ignore-words=
274
+
275
+ # A path to a file that contains private dictionary; one word per line.
276
+ spelling-private-dict-file=
277
+
278
+ # Tells whether to store unknown words to indicated private dictionary in
279
+ # --spelling-private-dict-file option instead of raising a message.
280
+ spelling-store-unknown-words=no
281
+
282
+
283
+ [TYPECHECK]
284
+
285
+ # Tells whether missing members accessed in mixin class should be ignored. A
286
+ # mixin class is detected if its name ends with "mixin" (case insensitive).
287
+ ignore-mixin-members=yes
288
+
289
+ # List of module names for which member attributes should not be checked
290
+ # (useful for modules/projects where namespaces are manipulated during runtime
291
+ # and thus existing member attributes cannot be deduced by static analysis. It
292
+ # supports qualified module names, as well as Unix pattern matching.
293
+ ignored-modules=
294
+
295
+ # List of class names for which member attributes should not be checked (useful
296
+ # for classes with dynamically set attributes). This supports the use of
297
+ # qualified names.
298
+ ignored-classes=optparse.Values,thread._local,_thread._local
299
+
300
+ # List of members which are set dynamically and missed by pylint inference
301
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
302
+ # expressions are accepted.
303
+ generated-members=
304
+
305
+ # List of decorators that produce context managers, such as
306
+ # contextlib.contextmanager. Add to this list to register other decorators that
307
+ # produce valid context managers.
308
+ contextmanager-decorators=contextlib.contextmanager
309
+
310
+
311
+ [VARIABLES]
312
+
313
+ # Tells whether we should check for unused import in __init__ files.
314
+ init-import=no
315
+
316
+ # A regular expression matching the name of dummy variables (i.e. expectedly
317
+ # not used).
318
+ dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy
319
+
320
+ # List of additional names supposed to be defined in builtins. Remember that
321
+ # you should avoid to define new builtins when possible.
322
+ additional-builtins=
323
+
324
+ # List of strings which can identify a callback function by name. A callback
325
+ # name must start or end with one of those strings.
326
+ callbacks=cb_,_cb
327
+
328
+ # List of qualified module names which can have objects that can redefine
329
+ # builtins.
330
+ redefining-builtins-modules=six.moves,future.builtins
331
+
332
+
333
+ [CLASSES]
334
+
335
+ # List of method names used to declare (i.e. assign) instance attributes.
336
+ defining-attr-methods=__init__,__new__,setUp
337
+
338
+ # List of valid names for the first argument in a class method.
339
+ valid-classmethod-first-arg=cls
340
+
341
+ # List of valid names for the first argument in a metaclass class method.
342
+ valid-metaclass-classmethod-first-arg=mcs
343
+
344
+ # List of member names, which should be excluded from the protected access
345
+ # warning.
346
+ exclude-protected=_asdict,_fields,_replace,_source,_make
347
+
348
+
349
+ [DESIGN]
350
+
351
+ # Maximum number of arguments for function / method
352
+ max-args=5
353
+
354
+ # Argument names that match this expression will be ignored. Default to name
355
+ # with leading underscore
356
+ ignored-argument-names=_.*
357
+
358
+ # Maximum number of locals for function / method body
359
+ max-locals=15
360
+
361
+ # Maximum number of return / yield for function / method body
362
+ max-returns=6
363
+
364
+ # Maximum number of branch for function / method body
365
+ max-branches=12
366
+
367
+ # Maximum number of statements in function / method body
368
+ max-statements=50
369
+
370
+ # Maximum number of parents for a class (see R0901).
371
+ max-parents=7
372
+
373
+ # Maximum number of attributes for a class (see R0902).
374
+ max-attributes=7
375
+
376
+ # Minimum number of public methods for a class (see R0903).
377
+ min-public-methods=2
378
+
379
+ # Maximum number of public methods for a class (see R0904).
380
+ max-public-methods=20
381
+
382
+ # Maximum number of boolean expressions in a if statement
383
+ max-bool-expr=5
384
+
385
+
386
+ [IMPORTS]
387
+
388
+ # Deprecated modules which should not be used, separated by a comma
389
+ deprecated-modules=optparse
390
+
391
+ # Create a graph of every (i.e. internal and external) dependencies in the
392
+ # given file (report RP0402 must not be disabled)
393
+ import-graph=
394
+
395
+ # Create a graph of external dependencies in the given file (report RP0402 must
396
+ # not be disabled)
397
+ ext-import-graph=
398
+
399
+ # Create a graph of internal dependencies in the given file (report RP0402 must
400
+ # not be disabled)
401
+ int-import-graph=
402
+
403
+ # Force import order to recognize a module as part of the standard
404
+ # compatibility libraries.
405
+ known-standard-library=
406
+
407
+ # Force import order to recognize a module as part of a third party library.
408
+ known-third-party=enchant
409
+
410
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
411
+ # 3 compatible code, which means that the block might have code that exists
412
+ # only in one or another interpreter, leading to false positives when analysed.
413
+ analyse-fallback-blocks=no
414
+
415
+
416
+ [EXCEPTIONS]
417
+
418
+ # Exceptions that will emit a warning when being caught. Defaults to
419
+ # "Exception"
420
+ overgeneral-exceptions=Exception
testbed/openvinotoolkit__datumaro/.travis.yml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ language: python
2
+
3
+ cache: pip
4
+
5
+ python:
6
+ - '3.6'
7
+ - '3.7'
8
+ - '3.8'
9
+
10
+ matrix:
11
+ include:
12
+ - dist: xenial
13
+
14
+ # measure coverage here
15
+ - dist: bionic
16
+ python: '3.6'
17
+ before_install:
18
+ - pip install coverage
19
+ script:
20
+ - coverage run -m unittest discover -v
21
+ - coverage run -a datum.py -h
22
+ after_success:
23
+ - coverage xml
24
+ - bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml
25
+
26
+ - dist: bionic
27
+ python: '3.7'
28
+ - dist: bionic
29
+ python: '3.8'
30
+
31
+ install:
32
+ - pip install -e ./
33
+ - pip install tensorflow
34
+
35
+ script:
36
+ - python -m unittest discover -v
37
+ - datum -h
testbed/openvinotoolkit__datumaro/CHANGELOG.md ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ All notable changes to this project will be documented in this file.
4
+
5
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
+
8
+
9
+ ## [Unreleased]
10
+ ### Added
11
+ - Task-specific Splitter (<https://github.com/openvinotoolkit/datumaro/pull/68>)
12
+ - `WiderFace` dataset format (<https://github.com/openvinotoolkit/datumaro/pull/65>)
13
+ - Function to transform annotations to labels (<https://github.com/openvinotoolkit/datumaro/pull/66>)
14
+ - `VGGFace2` dataset format (<https://github.com/openvinotoolkit/datumaro/pull/69>)
15
+
16
+ ### Changed
17
+ -
18
+
19
+ ### Deprecated
20
+ -
21
+
22
+ ### Removed
23
+ -
24
+
25
+ ### Fixed
26
+ -
27
+
28
+ ### Security
29
+ -
30
+
31
+ ## 12/10/2020 - Release v0.1.4
32
+ ### Added
33
+ - `CamVid` dataset format (<https://github.com/openvinotoolkit/datumaro/pull/57>)
34
+ - Ability to install `opencv-python-headless` dependency with `DATUMARO_HEADLESS=1`
35
+ enviroment variable instead of `opencv-python` (<https://github.com/openvinotoolkit/datumaro/pull/62>)
36
+
37
+ ### Changed
38
+ - Allow empty supercategory in COCO (<https://github.com/openvinotoolkit/datumaro/pull/54>)
39
+ - Allow Pascal VOC to search in subdirectories (<https://github.com/openvinotoolkit/datumaro/pull/50>)
40
+
41
+ ### Deprecated
42
+ -
43
+
44
+ ### Removed
45
+ -
46
+
47
+ ### Fixed
48
+ -
49
+
50
+ ### Security
51
+ -
52
+
53
+ ## 10/28/2020 - Release v0.1.3
54
+ ### Added
55
+ - `ImageNet` and `ImageNetTxt` dataset formats (<https://github.com/openvinotoolkit/datumaro/pull/41>)
56
+
57
+ ### Changed
58
+ -
59
+
60
+ ### Deprecated
61
+ -
62
+
63
+ ### Removed
64
+ -
65
+
66
+ ### Fixed
67
+ - Default `label-map` parameter value for VOC converter (<https://github.com/openvinotoolkit/datumaro/pull/34>)
68
+ - Randomness of random split transform (<https://github.com/openvinotoolkit/datumaro/pull/38>)
69
+ - `Transform.subsets()` method (<https://github.com/openvinotoolkit/datumaro/pull/38>)
70
+ - Supported unknown image formats in TF Detection API converter (<https://github.com/openvinotoolkit/datumaro/pull/40>)
71
+ - Supported empty attribute values in CVAT extractor (<https://github.com/openvinotoolkit/datumaro/pull/45>)
72
+
73
+ ### Security
74
+ -
75
+
76
+
77
+ ## 10/05/2020 - Release v0.1.2
78
+ ### Added
79
+ - `ByteImage` class to represent encoded images in memory and avoid recoding on save (<https://github.com/openvinotoolkit/datumaro/pull/27>)
80
+
81
+ ### Changed
82
+ - Implementation of format plugins simplified (<https://github.com/openvinotoolkit/datumaro/pull/22>)
83
+ - `default` is now a default subset name, instead of `None`. The values are interchangeable. (<https://github.com/openvinotoolkit/datumaro/pull/22>)
84
+ - Improved performance of transforms (<https://github.com/openvinotoolkit/datumaro/pull/22>)
85
+
86
+ ### Deprecated
87
+ -
88
+
89
+ ### Removed
90
+ - `image/depth` value from VOC export (<https://github.com/openvinotoolkit/datumaro/pull/27>)
91
+
92
+ ### Fixed
93
+ - Zero division errors in dataset statistics (<https://github.com/openvinotoolkit/datumaro/pull/31>)
94
+
95
+ ### Security
96
+ -
97
+
98
+
99
+ ## 09/24/2020 - Release v0.1.1
100
+ ### Added
101
+ - `reindex` option in COCO and CVAT converters (<https://github.com/openvinotoolkit/datumaro/pull/18>)
102
+ - Support for relative paths in LabelMe format (<https://github.com/openvinotoolkit/datumaro/pull/19>)
103
+ - MOTS png mask format support (<https://github.com/openvinotoolkit/datumaro/21>)
104
+
105
+ ### Changed
106
+ -
107
+
108
+ ### Deprecated
109
+ -
110
+
111
+ ### Removed
112
+ -
113
+
114
+ ### Fixed
115
+ -
116
+
117
+ ### Security
118
+ -
119
+
120
+
121
+ ## 09/10/2020 - Release v0.1.0
122
+ ### Added
123
+ - Initial release
124
+
125
+ ## Template
126
+ ```
127
+ ## [Unreleased]
128
+ ### Added
129
+ -
130
+
131
+ ### Changed
132
+ -
133
+
134
+ ### Deprecated
135
+ -
136
+
137
+ ### Removed
138
+ -
139
+
140
+ ### Fixed
141
+ -
142
+
143
+ ### Security
144
+ -
145
+ ```
testbed/openvinotoolkit__datumaro/CONTRIBUTING.md ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Table of Contents
2
+
3
+ - [Installation](#installation)
4
+ - [Usage](#usage)
5
+ - [Testing](#testing)
6
+ - [Design](#design-and-code-structure)
7
+
8
+ ## Installation
9
+
10
+ ### Prerequisites
11
+
12
+ - Python (3.5+)
13
+
14
+ ``` bash
15
+ git clone https://github.com/openvinotoolkit/datumaro
16
+ ```
17
+
18
+ Optionally, install a virtual environment (recommended):
19
+
20
+ ``` bash
21
+ python -m pip install virtualenv
22
+ python -m virtualenv venv
23
+ . venv/bin/activate
24
+ ```
25
+
26
+ Then install all dependencies:
27
+
28
+ ``` bash
29
+ while read -r p; do pip install $p; done < requirements.txt
30
+ ```
31
+
32
+ If you're working inside of a CVAT environment:
33
+ ``` bash
34
+ . .env/bin/activate
35
+ while read -r p; do pip install $p; done < datumaro/requirements.txt
36
+ ```
37
+
38
+ Install Datumaro:
39
+ ``` bash
40
+ pip install -e /path/to/the/cloned/repo/
41
+ ```
42
+
43
+ **Optional dependencies**
44
+
45
+ These components are only required for plugins and not installed by default:
46
+
47
+ - OpenVINO
48
+ - Accuracy Checker
49
+ - TensorFlow
50
+ - PyTorch
51
+ - MxNet
52
+ - Caffe
53
+
54
+ ## Usage
55
+
56
+ ``` bash
57
+ datum --help
58
+ python -m datumaro --help
59
+ python datumaro/ --help
60
+ python datum.py --help
61
+ ```
62
+
63
+ ``` python
64
+ import datumaro
65
+ ```
66
+
67
+ ## Testing
68
+
69
+ It is expected that all Datumaro functionality is covered and checked by
70
+ unit tests. Tests are placed in `tests/` directory.
71
+
72
+ To run tests use:
73
+
74
+ ``` bash
75
+ python -m unittest discover -s tests
76
+ ```
77
+
78
+ If you're working inside of a CVAT environment, you can also use:
79
+
80
+ ``` bash
81
+ python manage.py test datumaro/
82
+ ```
83
+
84
+ ## Design and code structure
85
+
86
+ - [Design document](docs/design.md)
87
+ - [Developer guide](docs/developer_guide.md)
88
+
89
+ ## Code style
90
+
91
+ Try to be readable and consistent with the existing codebase.
92
+ The project mostly follows PEP8 with little differences.
93
+ Continuation lines have a standard indentation step by default,
94
+ or any other, if it improves readability. For long conditionals use 2 steps.
95
+ No trailing whitespaces, 80 characters per line.
96
+
97
+ ## Environment
98
+
99
+ The recommended editor is VS Code with the Python plugin.
testbed/openvinotoolkit__datumaro/LICENSE ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (C) 2019-2020 Intel Corporation
4
+  
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"),
7
+ to deal in the Software without restriction, including without limitation
8
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
+ and/or sell copies of the Software, and to permit persons to whom
10
+ the Software is furnished to do so, subject to the following conditions:
11
+  
12
+ The above copyright notice and this permission notice shall be included
13
+ in all copies or substantial portions of the Software.
14
+  
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
19
+ OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
21
+ OR OTHER DEALINGS IN THE SOFTWARE.
22
+  
testbed/openvinotoolkit__datumaro/README.md ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Management Framework (Datumaro)
2
+
3
+ [![Build Status](https://travis-ci.org/openvinotoolkit/datumaro.svg?branch=develop)](https://travis-ci.org/openvinotoolkit/datumaro)
4
+ [![Codacy Badge](https://api.codacy.com/project/badge/Grade/759d2d873b59495aa3d3f8c51b786246)](https://app.codacy.com/gh/openvinotoolkit/datumaro?utm_source=github.com&utm_medium=referral&utm_content=openvinotoolkit/datumaro&utm_campaign=Badge_Grade_Dashboard)
5
+ [![Codacy Badge](https://app.codacy.com/project/badge/Coverage/9511b691ff134e739ea6fc524f7cc760)](https://www.codacy.com/gh/openvinotoolkit/datumaro?utm_source=github.com&utm_medium=referral&utm_content=openvinotoolkit/datumaro&utm_campaign=Badge_Coverage)
6
+
7
+ A framework and CLI tool to build, transform, and analyze datasets.
8
+
9
+ <!--lint disable fenced-code-flag-->
10
+ ```
11
+ VOC dataset ---> Annotation tool
12
+ + /
13
+ COCO dataset -----> Datumaro ---> dataset ------> Model training
14
+ + \
15
+ CVAT annotations ---> Publication, statistics etc.
16
+ ```
17
+ <!--lint enable fenced-code-flag-->
18
+
19
+ # Table of Contents
20
+
21
+ - [Examples](#examples)
22
+ - [Features](#features)
23
+ - [Installation](#installation)
24
+ - [Usage](#usage)
25
+ - [User manual](docs/user_manual.md)
26
+ - [Contributing](#contributing)
27
+
28
+ ## Examples
29
+
30
+ [(Back to top)](#table-of-contents)
31
+
32
+ <!--lint disable list-item-indent-->
33
+ <!--lint disable list-item-bullet-indent-->
34
+
35
+ - Convert PASCAL VOC dataset to COCO format, keep only images with `cat` class presented:
36
+ ```bash
37
+ # Download VOC dataset:
38
+ # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar
39
+ datum convert --input-format voc --input-path <path/to/voc> \
40
+ --output-format coco \
41
+ --filter '/item[annotation/label="cat"]'
42
+ ```
43
+
44
+ - Convert only non-`occluded` annotations from a [CVAT](https://github.com/opencv/cvat) project to TFrecord:
45
+ ```bash
46
+ # export Datumaro dataset in CVAT UI, extract somewhere, go to the project dir
47
+ datum project filter -e '/item/annotation[occluded="False"]' \
48
+ --mode items+anno --output-dir not_occluded
49
+ datum project export --project not_occluded \
50
+ --format tf_detection_api -- --save-images
51
+ ```
52
+
53
+ - Annotate MS COCO dataset, extract image subset, re-annotate it in [CVAT](https://github.com/opencv/cvat), update old dataset:
54
+ ```bash
55
+ # Download COCO dataset http://cocodataset.org/#download
56
+ # Put images to coco/images/ and annotations to coco/annotations/
57
+ datum project import --format coco --input-path <path/to/coco>
58
+ datum project export --filter '/image[images_I_dont_like]' --format cvat \
59
+ --output-dir reannotation
60
+ # import dataset and images to CVAT, re-annotate
61
+ # export Datumaro project, extract to 'reannotation-upd'
62
+ datum project project merge reannotation-upd
63
+ datum project export --format coco
64
+ ```
65
+
66
+ - Annotate instance polygons in [CVAT](https://github.com/opencv/cvat), export as masks in COCO:
67
+ ```bash
68
+ datum convert --input-format cvat --input-path <path/to/cvat.xml> \
69
+ --output-format coco -- --segmentation-mode masks
70
+ ```
71
+
72
+ - Apply an OpenVINO detection model to some COCO-like dataset,
73
+ then compare annotations with ground truth and visualize in TensorBoard:
74
+ ```bash
75
+ datum project import --format coco --input-path <path/to/coco>
76
+ # create model results interpretation script
77
+ datum model add mymodel openvino \
78
+ --weights model.bin --description model.xml \
79
+ --interpretation-script parse_results.py
80
+ datum model run --model mymodel --output-dir mymodel_inference/
81
+ datum project diff mymodel_inference/ --format tensorboard --output-dir diff
82
+ ```
83
+
84
+ - Change colors in PASCAL VOC-like `.png` masks:
85
+ ```bash
86
+ datum project import --format voc --input-path <path/to/voc/dataset>
87
+
88
+ # Create a color map file with desired colors:
89
+ #
90
+ # label : color_rgb : parts : actions
91
+ # cat:0,0,255::
92
+ # dog:255,0,0::
93
+ #
94
+ # Save as mycolormap.txt
95
+
96
+ datum project export --format voc_segmentation -- --label-map mycolormap.txt
97
+ # add "--apply-colormap=0" to save grayscale (indexed) masks
98
+ # check "--help" option for more info
99
+ # use "datum --loglevel debug" for extra conversion info
100
+ ```
101
+
102
+ <!--lint enable list-item-bullet-indent-->
103
+ <!--lint enable list-item-indent-->
104
+
105
+ ## Features
106
+
107
+ [(Back to top)](#table-of-contents)
108
+
109
+ - Dataset reading, writing, conversion in any direction. Supported formats:
110
+ - [COCO](http://cocodataset.org/#format-data) (`image_info`, `instances`, `person_keypoints`, `captions`, `labels`*)
111
+ - [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/index.html) (`classification`, `detection`, `segmentation`, `action_classification`, `person_layout`)
112
+ - [YOLO](https://github.com/AlexeyAB/darknet#how-to-train-pascal-voc-data) (`bboxes`)
113
+ - [TF Detection API](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md) (`bboxes`, `masks`)
114
+ - [WIDER Face](http://shuoyang1213.me/WIDERFACE/) (`bboxes`)
115
+ - [VGGFace2](https://github.com/ox-vgg/vgg_face2) (`landmarks`, `bboxes`)
116
+ - [MOT sequences](https://arxiv.org/pdf/1906.04567.pdf)
117
+ - [MOTS PNG](https://www.vision.rwth-aachen.de/page/mots)
118
+ - [ImageNet](http://image-net.org/)
119
+ - [CamVid](http://mi.eng.cam.ac.uk/research/projects/VideoRec/CamVid/)
120
+ - [CVAT](https://github.com/opencv/cvat/blob/develop/cvat/apps/documentation/xml_format.md)
121
+ - [LabelMe](http://labelme.csail.mit.edu/Release3.0)
122
+ - Dataset building
123
+ - Merging multiple datasets into one
124
+ - Dataset filtering by a custom criteria:
125
+ - remove polygons of a certain class
126
+ - remove images without annotations of a specific class
127
+ - remove `occluded` annotations from images
128
+ - keep only vertically-oriented images
129
+ - remove small area bounding boxes from annotations
130
+ - Annotation conversions, for instance:
131
+ - polygons to instance masks and vise-versa
132
+ - apply a custom colormap for mask annotations
133
+ - rename or remove dataset labels
134
+ - Dataset quality checking
135
+ - Simple checking for errors
136
+ - Comparison with model infernece
137
+ - Merging and comparison of multiple datasets
138
+ - Dataset comparison
139
+ - Dataset statistics (image mean and std, annotation statistics)
140
+ - Model integration
141
+ - Inference (OpenVINO, Caffe, PyTorch, TensorFlow, MxNet, etc.)
142
+ - Explainable AI ([RISE algorithm](https://arxiv.org/abs/1806.07421))
143
+
144
+ > Check [the design document](docs/design.md) for a full list of features.
145
+ > Check [the user manual](docs/user_manual.md) for usage instructions.
146
+
147
+ ## Installation
148
+
149
+ [(Back to top)](#table-of-contents)
150
+
151
+ ### Dependencies
152
+
153
+ - Python (3.6+)
154
+ - Optional: OpenVINO, TensforFlow, PyTorch, MxNet, Caffe, Accuracy Checker
155
+
156
+ Optionally, create a virtual environment:
157
+
158
+ ``` bash
159
+ python -m pip install virtualenv
160
+ python -m virtualenv venv
161
+ . venv/bin/activate
162
+ ```
163
+
164
+ Install Datumaro package:
165
+
166
+ ``` bash
167
+ pip install 'git+https://github.com/openvinotoolkit/datumaro'
168
+ ```
169
+
170
+ ## Usage
171
+
172
+ [(Back to top)](#table-of-contents)
173
+
174
+ There are several options available:
175
+ - [A standalone command-line tool](#standalone-tool)
176
+ - [A python module](#python-module)
177
+
178
+ ### Standalone tool
179
+
180
+ Datuaro as a standalone tool allows to do various dataset operations from
181
+ the command line interface:
182
+
183
+ ``` bash
184
+ datum --help
185
+ python -m datumaro --help
186
+ ```
187
+
188
+ ### Python module
189
+
190
+ Datumaro can be used in custom scripts as a Python module. Used this way, it
191
+ allows to use its features from an existing codebase, enabling dataset
192
+ reading, exporting and iteration capabilities, simplifying integration of custom
193
+ formats and providing high performance operations:
194
+
195
+ ``` python
196
+ from datumaro.components.project import Project # project-related things
197
+ import datumaro.components.extractor # annotations and high-level interfaces
198
+
199
+ # load a Datumaro project
200
+ project = Project.load('directory')
201
+
202
+ # create a dataset
203
+ dataset = project.make_dataset()
204
+
205
+ # keep only annotated images
206
+ dataset = dataset.select(lambda item: len(item.annotations) != 0)
207
+
208
+ # change dataset labels
209
+ dataset = dataset.transform(project.env.transforms.get('remap_labels'),
210
+ {'cat': 'dog', # rename cat to dog
211
+ 'truck': 'car', # rename truck to car
212
+ 'person': '', # remove this label
213
+ }, default='delete')
214
+
215
+ for item in dataset:
216
+ print(item.id, item.annotations)
217
+
218
+ # export the resulting dataset in COCO format
219
+ project.env.converters.get('coco').convert(dataset, save_dir='dst/dir')
220
+ ```
221
+
222
+ > Check our [developer guide](docs/developer_guide.md) for additional information.
223
+
224
+ ## Contributing
225
+
226
+ [(Back to top)](#table-of-contents)
227
+
228
+ Feel free to [open an Issue](https://github.com/openvinotoolkit/datumaro/issues/new), if you
229
+ think something needs to be changed. You are welcome to participate in development,
230
+ instructions are available in our [contribution guide](CONTRIBUTING.md).
testbed/openvinotoolkit__datumaro/datum.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ import sys
3
+
4
+ from datumaro.cli.__main__ import main
5
+
6
+
7
+ if __name__ == '__main__':
8
+ sys.exit(main())
testbed/openvinotoolkit__datumaro/datumaro/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
testbed/openvinotoolkit__datumaro/datumaro/__main__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import sys
7
+
8
+ from datumaro.cli.__main__ import main
9
+
10
+
11
+ if __name__ == '__main__':
12
+ sys.exit(main())
testbed/openvinotoolkit__datumaro/datumaro/cli/commands/merge.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import argparse
7
+ import json
8
+ import logging as log
9
+ import os.path as osp
10
+ from collections import OrderedDict
11
+
12
+ from datumaro.components.project import Project
13
+ from datumaro.components.operations import (IntersectMerge,
14
+ QualityError, MergeError)
15
+
16
+ from ..util import at_least, MultilineFormatter, CliException
17
+ from ..util.project import generate_next_file_name, load_project
18
+
19
+
20
+ def build_parser(parser_ctor=argparse.ArgumentParser):
21
+ parser = parser_ctor(help="Merge few projects",
22
+ description="""
23
+ Merges multiple datasets into one. This can be useful if you
24
+ have few annotations and wish to merge them,
25
+ taking into consideration potential overlaps and conflicts.
26
+ This command can try to find a common ground by voting or
27
+ return a list of conflicts.|n
28
+ |n
29
+ Examples:|n
30
+ - Merge annotations from 3 (or more) annotators:|n
31
+ |s|smerge project1/ project2/ project3/|n
32
+ - Check groups of the merged dataset for consistence:|n
33
+ |s|s|slook for groups consising of 'person', 'hand' 'head', 'foot'|n
34
+ |s|smerge project1/ project2/ -g 'person,hand?,head,foot?'
35
+ """,
36
+ formatter_class=MultilineFormatter)
37
+
38
+ def _group(s):
39
+ return s.split(',')
40
+
41
+ parser.add_argument('project', nargs='+', action=at_least(2),
42
+ help="Path to a project (repeatable)")
43
+ parser.add_argument('-iou', '--iou-thresh', default=0.25, type=float,
44
+ help="IoU match threshold for segments (default: %(default)s)")
45
+ parser.add_argument('-oconf', '--output-conf-thresh',
46
+ default=0.0, type=float,
47
+ help="Confidence threshold for output "
48
+ "annotations (default: %(default)s)")
49
+ parser.add_argument('--quorum', default=0, type=int,
50
+ help="Minimum count for a label and attribute voting "
51
+ "results to be counted (default: %(default)s)")
52
+ parser.add_argument('-g', '--groups', action='append', type=_group,
53
+ default=[],
54
+ help="A comma-separated list of labels in "
55
+ "annotation groups to check. '?' postfix can be added to a label to"
56
+ "make it optional in the group (repeatable)")
57
+ parser.add_argument('-o', '--output-dir', dest='dst_dir', default=None,
58
+ help="Output directory (default: current project's dir)")
59
+ parser.add_argument('--overwrite', action='store_true',
60
+ help="Overwrite existing files in the save directory")
61
+ parser.set_defaults(command=merge_command)
62
+
63
+ return parser
64
+
65
+ def merge_command(args):
66
+ source_projects = [load_project(p) for p in args.project]
67
+
68
+ dst_dir = args.dst_dir
69
+ if dst_dir:
70
+ if not args.overwrite and osp.isdir(dst_dir) and os.listdir(dst_dir):
71
+ raise CliException("Directory '%s' already exists "
72
+ "(pass --overwrite to overwrite)" % dst_dir)
73
+ else:
74
+ dst_dir = generate_next_file_name('merged')
75
+
76
+ source_datasets = []
77
+ for p in source_projects:
78
+ log.debug("Loading project '%s' dataset", p.config.project_name)
79
+ source_datasets.append(p.make_dataset())
80
+
81
+ merger = IntersectMerge(conf=IntersectMerge.Conf(
82
+ pairwise_dist=args.iou_thresh, groups=args.groups,
83
+ output_conf_thresh=args.output_conf_thresh, quorum=args.quorum
84
+ ))
85
+ merged_dataset = merger(source_datasets)
86
+
87
+ merged_project = Project()
88
+ output_dataset = merged_project.make_dataset()
89
+ output_dataset.define_categories(merged_dataset.categories())
90
+ merged_dataset = output_dataset.update(merged_dataset)
91
+ merged_dataset.save(save_dir=dst_dir)
92
+
93
+ report_path = osp.join(dst_dir, 'merge_report.json')
94
+ save_merge_report(merger, report_path)
95
+
96
+ dst_dir = osp.abspath(dst_dir)
97
+ log.info("Merge results have been saved to '%s'" % dst_dir)
98
+ log.info("Report has been saved to '%s'" % report_path)
99
+
100
+ return 0
101
+
102
+ def save_merge_report(merger, path):
103
+ item_errors = OrderedDict()
104
+ source_errors = OrderedDict()
105
+ all_errors = []
106
+
107
+ for e in merger.errors:
108
+ if isinstance(e, QualityError):
109
+ item_errors[str(e.item_id)] = item_errors.get(str(e.item_id), 0) + 1
110
+ elif isinstance(e, MergeError):
111
+ for s in e.sources:
112
+ source_errors[s] = source_errors.get(s, 0) + 1
113
+ item_errors[str(e.item_id)] = item_errors.get(str(e.item_id), 0) + 1
114
+
115
+ all_errors.append(str(e))
116
+
117
+ errors = OrderedDict([
118
+ ('Item errors', item_errors),
119
+ ('Source errors', source_errors),
120
+ ('All errors', all_errors),
121
+ ])
122
+
123
+ with open(path, 'w') as f:
124
+ json.dump(errors, f, indent=4)
testbed/openvinotoolkit__datumaro/datumaro/plugins/__init__.py ADDED
File without changes
testbed/openvinotoolkit__datumaro/datumaro/plugins/accuracy_checker_plugin/launcher.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import os.path as osp
7
+ import yaml
8
+
9
+ from datumaro.components.cli_plugin import CliPlugin
10
+ from datumaro.components.launcher import Launcher
11
+
12
+ from .details.ac import GenericAcLauncher as _GenericAcLauncher
13
+
14
+
15
+ class AcLauncher(Launcher, CliPlugin):
16
+ """
17
+ Generic model launcher with Accuracy Checker backend.
18
+ """
19
+
20
+ @classmethod
21
+ def build_cmdline_parser(cls, **kwargs):
22
+ parser = super().build_cmdline_parser(**kwargs)
23
+ parser.add_argument('-c', '--config', type=osp.abspath, required=True,
24
+ help="Path to the launcher configuration file (.yml)")
25
+ return parser
26
+
27
+ def __init__(self, config, model_dir=None):
28
+ model_dir = model_dir or ''
29
+ with open(osp.join(model_dir, config), 'r') as f:
30
+ config = yaml.safe_load(f)
31
+ self._launcher = _GenericAcLauncher.from_config(config)
32
+
33
+ def launch(self, inputs):
34
+ return self._launcher.launch(inputs)
35
+
36
+ def categories(self):
37
+ return self._launcher.categories()
testbed/openvinotoolkit__datumaro/datumaro/plugins/camvid_format.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import os
7
+ import os.path as osp
8
+ from collections import OrderedDict
9
+ from enum import Enum
10
+ from glob import glob
11
+
12
+ import numpy as np
13
+ from datumaro.components.converter import Converter
14
+ from datumaro.components.extractor import (AnnotationType, CompiledMask,
15
+ DatasetItem, Importer, LabelCategories, Mask,
16
+ MaskCategories, SourceExtractor)
17
+ from datumaro.util import find, str_to_bool
18
+ from datumaro.util.image import save_image
19
+ from datumaro.util.mask_tools import lazy_mask, paint_mask, generate_colormap
20
+
21
+
22
+ CamvidLabelMap = OrderedDict([
23
+ ('Void', (0, 0, 0)),
24
+ ('Animal', (64, 128, 64)),
25
+ ('Archway', (192, 0, 128)),
26
+ ('Bicyclist', (0, 128, 192)),
27
+ ('Bridge', (0, 128, 64)),
28
+ ('Building', (128, 0, 0)),
29
+ ('Car', (64, 0, 128)),
30
+ ('CartLuggagePram', (64, 0, 192)),
31
+ ('Child', (192, 128, 64)),
32
+ ('Column_Pole', (192, 192, 128)),
33
+ ('Fence', (64, 64, 128)),
34
+ ('LaneMkgsDriv', (128, 0, 192)),
35
+ ('LaneMkgsNonDriv', (192, 0, 64)),
36
+ ('Misc_Text', (128, 128, 64)),
37
+ ('MotorcycycleScooter', (192, 0, 192)),
38
+ ('OtherMoving', (128, 64, 64)),
39
+ ('ParkingBlock', (64, 192, 128)),
40
+ ('Pedestrian', (64, 64, 0)),
41
+ ('Road', (128, 64, 128)),
42
+ ('RoadShoulder', (128, 128, 192)),
43
+ ('Sidewalk', (0, 0, 192)),
44
+ ('SignSymbol', (192, 128, 128)),
45
+ ('Sky', (128, 128, 128)),
46
+ ('SUVPickupTruck', (64, 128, 192)),
47
+ ('TrafficCone', (0, 0, 64)),
48
+ ('TrafficLight', (0, 64, 64)),
49
+ ('Train', (192, 64, 128)),
50
+ ('Tree', (128, 128, 0)),
51
+ ('Truck_Bus', (192, 128, 192)),
52
+ ('Tunnel', (64, 0, 64)),
53
+ ('VegetationMisc', (192, 192, 0)),
54
+ ('Wall', (64, 192, 0))
55
+ ])
56
+
57
+ class CamvidPath:
58
+ LABELMAP_FILE = 'label_colors.txt'
59
+ SEGM_DIR = "annot"
60
+ IMAGE_EXT = '.png'
61
+
62
+
63
+ def parse_label_map(path):
64
+ if not path:
65
+ return None
66
+
67
+ label_map = OrderedDict()
68
+ with open(path, 'r') as f:
69
+ for line in f:
70
+ # skip empty and commented lines
71
+ line = line.strip()
72
+ if not line or line and line[0] == '#':
73
+ continue
74
+
75
+ # color, name
76
+ label_desc = line.strip().split()
77
+
78
+ if 2 < len(label_desc):
79
+ name = label_desc[3]
80
+ color = tuple([int(c) for c in label_desc[:-1]])
81
+ else:
82
+ name = label_desc[0]
83
+ color = None
84
+
85
+ if name in label_map:
86
+ raise ValueError("Label '%s' is already defined" % name)
87
+
88
+ label_map[name] = color
89
+ return label_map
90
+
91
+ def write_label_map(path, label_map):
92
+ with open(path, 'w') as f:
93
+ for label_name, label_desc in label_map.items():
94
+ if label_desc:
95
+ color_rgb = ' '.join(str(c) for c in label_desc)
96
+ else:
97
+ color_rgb = ''
98
+ f.write('%s %s\n' % (color_rgb, label_name))
99
+
100
+ def make_camvid_categories(label_map=None):
101
+ if label_map is None:
102
+ label_map = CamvidLabelMap
103
+
104
+ # There must always be a label with color (0, 0, 0) at index 0
105
+ bg_label = find(label_map.items(), lambda x: x[1] == (0, 0, 0))
106
+ if bg_label is not None:
107
+ bg_label = bg_label[0]
108
+ else:
109
+ bg_label = 'background'
110
+ if bg_label not in label_map:
111
+ has_colors = any(v is not None for v in label_map.values())
112
+ color = (0, 0, 0) if has_colors else None
113
+ label_map[bg_label] = color
114
+ label_map.move_to_end(bg_label, last=False)
115
+
116
+ categories = {}
117
+ label_categories = LabelCategories()
118
+ for label, desc in label_map.items():
119
+ label_categories.add(label)
120
+ categories[AnnotationType.label] = label_categories
121
+
122
+ has_colors = any(v is not None for v in label_map.values())
123
+ if not has_colors: # generate new colors
124
+ colormap = generate_colormap(len(label_map))
125
+ else: # only copy defined colors
126
+ label_id = lambda label: label_categories.find(label)[0]
127
+ colormap = { label_id(name): (desc[0], desc[1], desc[2])
128
+ for name, desc in label_map.items() }
129
+ mask_categories = MaskCategories(colormap)
130
+ mask_categories.inverse_colormap # pylint: disable=pointless-statement
131
+ categories[AnnotationType.mask] = mask_categories
132
+ return categories
133
+
134
+
135
+ class CamvidExtractor(SourceExtractor):
136
+ def __init__(self, path):
137
+ assert osp.isfile(path), path
138
+ self._path = path
139
+ self._dataset_dir = osp.dirname(path)
140
+ super().__init__(subset=osp.splitext(osp.basename(path))[0])
141
+
142
+ self._categories = self._load_categories(self._dataset_dir)
143
+ self._items = list(self._load_items(path).values())
144
+
145
+ def _load_categories(self, path):
146
+ label_map = None
147
+ label_map_path = osp.join(path, CamvidPath.LABELMAP_FILE)
148
+ if osp.isfile(label_map_path):
149
+ label_map = parse_label_map(label_map_path)
150
+ else:
151
+ label_map = CamvidLabelMap
152
+ self._labels = [label for label in label_map]
153
+ return make_camvid_categories(label_map)
154
+
155
+ def _load_items(self, path):
156
+ items = {}
157
+ with open(path, encoding='utf-8') as f:
158
+ for line in f:
159
+ objects = line.split()
160
+ image = objects[0]
161
+ item_id = ('/'.join(image.split('/')[2:]))[:-len(CamvidPath.IMAGE_EXT)]
162
+ image_path = osp.join(self._dataset_dir,
163
+ (image, image[1:])[image[0] == '/'])
164
+ item_annotations = []
165
+ if 1 < len(objects):
166
+ gt = objects[1]
167
+ gt_path = osp.join(self._dataset_dir,
168
+ (gt, gt[1:]) [gt[0] == '/'])
169
+ inverse_cls_colormap = \
170
+ self._categories[AnnotationType.mask].inverse_colormap
171
+ mask = lazy_mask(gt_path, inverse_cls_colormap)
172
+ # loading mask through cache
173
+ mask = mask()
174
+ classes = np.unique(mask)
175
+ labels = self._categories[AnnotationType.label]._indices
176
+ labels = { labels[label_name]: label_name
177
+ for label_name in labels }
178
+ for label_id in classes:
179
+ if labels[label_id] in self._labels:
180
+ image = self._lazy_extract_mask(mask, label_id)
181
+ item_annotations.append(Mask(image=image, label=label_id))
182
+ items[item_id] = DatasetItem(id=item_id, subset=self._subset,
183
+ image=image_path, annotations=item_annotations)
184
+ return items
185
+
186
+ @staticmethod
187
+ def _lazy_extract_mask(mask, c):
188
+ return lambda: mask == c
189
+
190
+
191
+ class CamvidImporter(Importer):
192
+ @classmethod
193
+ def find_sources(cls, path):
194
+ subset_paths = [p for p in glob(osp.join(path, '**.txt'), recursive=True)
195
+ if osp.basename(p) != CamvidPath.LABELMAP_FILE]
196
+ sources = []
197
+ for subset_path in subset_paths:
198
+ sources += cls._find_sources_recursive(
199
+ subset_path, '.txt', 'camvid')
200
+ return sources
201
+
202
+
203
+ LabelmapType = Enum('LabelmapType', ['camvid', 'source'])
204
+
205
+ class CamvidConverter(Converter):
206
+ DEFAULT_IMAGE_EXT = '.png'
207
+
208
+ @classmethod
209
+ def build_cmdline_parser(cls, **kwargs):
210
+ parser = super().build_cmdline_parser(**kwargs)
211
+
212
+ parser.add_argument('--apply-colormap', type=str_to_bool, default=True,
213
+ help="Use colormap for class masks (default: %(default)s)")
214
+ parser.add_argument('--label-map', type=cls._get_labelmap, default=None,
215
+ help="Labelmap file path or one of %s" % \
216
+ ', '.join(t.name for t in LabelmapType))
217
+
218
+ def __init__(self, extractor, save_dir,
219
+ apply_colormap=True, label_map=None, **kwargs):
220
+ super().__init__(extractor, save_dir, **kwargs)
221
+
222
+ self._apply_colormap = apply_colormap
223
+
224
+ if label_map is None:
225
+ label_map = LabelmapType.source.name
226
+ self._load_categories(label_map)
227
+
228
+ def apply(self):
229
+ subset_dir = self._save_dir
230
+ os.makedirs(subset_dir, exist_ok=True)
231
+
232
+ for subset_name, subset in self._extractor.subsets().items():
233
+ segm_list = {}
234
+ for item in subset:
235
+ masks = [a for a in item.annotations
236
+ if a.type == AnnotationType.mask]
237
+
238
+ if masks:
239
+ compiled_mask = CompiledMask.from_instance_masks(masks,
240
+ instance_labels=[self._label_id_mapping(m.label)
241
+ for m in masks])
242
+
243
+ self.save_segm(osp.join(subset_dir,
244
+ subset_name + CamvidPath.SEGM_DIR,
245
+ item.id + CamvidPath.IMAGE_EXT),
246
+ compiled_mask.class_mask)
247
+ segm_list[item.id] = True
248
+ else:
249
+ segm_list[item.id] = False
250
+
251
+ if self._save_images:
252
+ self._save_image(item, osp.join(subset_dir, subset_name,
253
+ item.id + CamvidPath.IMAGE_EXT))
254
+
255
+ self.save_segm_lists(subset_name, segm_list)
256
+ self.save_label_map()
257
+
258
+ def save_segm(self, path, mask, colormap=None):
259
+ if self._apply_colormap:
260
+ if colormap is None:
261
+ colormap = self._categories[AnnotationType.mask].colormap
262
+ mask = paint_mask(mask, colormap)
263
+ save_image(path, mask, create_dir=True)
264
+
265
+ def save_segm_lists(self, subset_name, segm_list):
266
+ if not segm_list:
267
+ return
268
+
269
+ ann_file = osp.join(self._save_dir, subset_name + '.txt')
270
+ with open(ann_file, 'w') as f:
271
+ for item in segm_list:
272
+ if segm_list[item]:
273
+ path_mask = '/%s/%s' % (subset_name + CamvidPath.SEGM_DIR,
274
+ item + CamvidPath.IMAGE_EXT)
275
+ else:
276
+ path_mask = ''
277
+ f.write('/%s/%s %s\n' % (subset_name,
278
+ item + CamvidPath.IMAGE_EXT, path_mask))
279
+
280
+ def save_label_map(self):
281
+ path = osp.join(self._save_dir, CamvidPath.LABELMAP_FILE)
282
+ labels = self._extractor.categories()[AnnotationType.label]._indices
283
+ if len(self._label_map) > len(labels):
284
+ self._label_map.pop('background')
285
+ write_label_map(path, self._label_map)
286
+
287
+ def _load_categories(self, label_map_source):
288
+ if label_map_source == LabelmapType.camvid.name:
289
+ # use the default Camvid colormap
290
+ label_map = CamvidLabelMap
291
+
292
+ elif label_map_source == LabelmapType.source.name and \
293
+ AnnotationType.mask not in self._extractor.categories():
294
+ # generate colormap for input labels
295
+ labels = self._extractor.categories() \
296
+ .get(AnnotationType.label, LabelCategories())
297
+ label_map = OrderedDict((item.name, None)
298
+ for item in labels.items)
299
+
300
+ elif label_map_source == LabelmapType.source.name and \
301
+ AnnotationType.mask in self._extractor.categories():
302
+ # use source colormap
303
+ labels = self._extractor.categories()[AnnotationType.label]
304
+ colors = self._extractor.categories()[AnnotationType.mask]
305
+ label_map = OrderedDict()
306
+ for idx, item in enumerate(labels.items):
307
+ color = colors.colormap.get(idx)
308
+ if color is not None:
309
+ label_map[item.name] = color
310
+
311
+ elif isinstance(label_map_source, dict):
312
+ label_map = OrderedDict(
313
+ sorted(label_map_source.items(), key=lambda e: e[0]))
314
+
315
+ elif isinstance(label_map_source, str) and osp.isfile(label_map_source):
316
+ label_map = parse_label_map(label_map_source)
317
+
318
+ else:
319
+ raise Exception("Wrong labelmap specified, "
320
+ "expected one of %s or a file path" % \
321
+ ', '.join(t.name for t in LabelmapType))
322
+
323
+ self._categories = make_camvid_categories(label_map)
324
+ self._label_map = label_map
325
+ self._label_id_mapping = self._make_label_id_map()
326
+
327
+ def _make_label_id_map(self):
328
+ source_labels = {
329
+ id: label.name for id, label in
330
+ enumerate(self._extractor.categories().get(
331
+ AnnotationType.label, LabelCategories()).items)
332
+ }
333
+ target_labels = {
334
+ label.name: id for id, label in
335
+ enumerate(self._categories[AnnotationType.label].items)
336
+ }
337
+ id_mapping = {
338
+ src_id: target_labels.get(src_label, 0)
339
+ for src_id, src_label in source_labels.items()
340
+ }
341
+
342
+ def map_id(src_id):
343
+ return id_mapping.get(src_id, 0)
344
+ return map_id
testbed/openvinotoolkit__datumaro/datumaro/plugins/cvat_format/format.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ class CvatPath:
7
+ IMAGES_DIR = 'images'
8
+
9
+ IMAGE_EXT = '.jpg'
testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/__init__.py ADDED
File without changes
testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/converter.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ # pylint: disable=no-self-use
7
+
8
+ import json
9
+ import numpy as np
10
+ import os
11
+ import os.path as osp
12
+
13
+ from datumaro.components.converter import Converter
14
+ from datumaro.components.extractor import (
15
+ DEFAULT_SUBSET_NAME, Annotation, _Shape,
16
+ Label, Mask, RleMask, Points, Polygon, PolyLine, Bbox, Caption,
17
+ LabelCategories, MaskCategories, PointsCategories
18
+ )
19
+ from datumaro.util import cast
20
+ import pycocotools.mask as mask_utils
21
+
22
+ from .format import DatumaroPath
23
+
24
+
25
+ class _SubsetWriter:
26
+ def __init__(self, name, context):
27
+ self._name = name
28
+ self._context = context
29
+
30
+ self._data = {
31
+ 'info': {},
32
+ 'categories': {},
33
+ 'items': [],
34
+ }
35
+
36
+ @property
37
+ def categories(self):
38
+ return self._data['categories']
39
+
40
+ @property
41
+ def items(self):
42
+ return self._data['items']
43
+
44
+ def write_item(self, item):
45
+ annotations = []
46
+ item_desc = {
47
+ 'id': item.id,
48
+ 'annotations': annotations,
49
+ }
50
+ if item.attributes:
51
+ item_desc['attr'] = item.attributes
52
+ if item.path:
53
+ item_desc['path'] = item.path
54
+ if item.has_image:
55
+ path = item.image.path
56
+ if self._context._save_images:
57
+ path = self._context._make_image_filename(item)
58
+ self._context._save_image(item, path)
59
+
60
+ item_desc['image'] = {
61
+ 'size': item.image.size,
62
+ 'path': path,
63
+ }
64
+ self.items.append(item_desc)
65
+
66
+ for ann in item.annotations:
67
+ if isinstance(ann, Label):
68
+ converted_ann = self._convert_label_object(ann)
69
+ elif isinstance(ann, Mask):
70
+ converted_ann = self._convert_mask_object(ann)
71
+ elif isinstance(ann, Points):
72
+ converted_ann = self._convert_points_object(ann)
73
+ elif isinstance(ann, PolyLine):
74
+ converted_ann = self._convert_polyline_object(ann)
75
+ elif isinstance(ann, Polygon):
76
+ converted_ann = self._convert_polygon_object(ann)
77
+ elif isinstance(ann, Bbox):
78
+ converted_ann = self._convert_bbox_object(ann)
79
+ elif isinstance(ann, Caption):
80
+ converted_ann = self._convert_caption_object(ann)
81
+ else:
82
+ raise NotImplementedError()
83
+ annotations.append(converted_ann)
84
+
85
+ def write_categories(self, categories):
86
+ for ann_type, desc in categories.items():
87
+ if isinstance(desc, LabelCategories):
88
+ converted_desc = self._convert_label_categories(desc)
89
+ elif isinstance(desc, MaskCategories):
90
+ converted_desc = self._convert_mask_categories(desc)
91
+ elif isinstance(desc, PointsCategories):
92
+ converted_desc = self._convert_points_categories(desc)
93
+ else:
94
+ raise NotImplementedError()
95
+ self.categories[ann_type.name] = converted_desc
96
+
97
+ def write(self, save_dir):
98
+ with open(osp.join(save_dir, '%s.json' % (self._name)), 'w') as f:
99
+ json.dump(self._data, f)
100
+
101
+ def _convert_annotation(self, obj):
102
+ assert isinstance(obj, Annotation)
103
+
104
+ ann_json = {
105
+ 'id': cast(obj.id, int),
106
+ 'type': cast(obj.type.name, str),
107
+ 'attributes': obj.attributes,
108
+ 'group': cast(obj.group, int, 0),
109
+ }
110
+ return ann_json
111
+
112
+ def _convert_label_object(self, obj):
113
+ converted = self._convert_annotation(obj)
114
+
115
+ converted.update({
116
+ 'label_id': cast(obj.label, int),
117
+ })
118
+ return converted
119
+
120
+ def _convert_mask_object(self, obj):
121
+ converted = self._convert_annotation(obj)
122
+
123
+ if isinstance(obj, RleMask):
124
+ rle = obj.rle
125
+ else:
126
+ rle = mask_utils.encode(
127
+ np.require(obj.image, dtype=np.uint8, requirements='F'))
128
+
129
+ converted.update({
130
+ 'label_id': cast(obj.label, int),
131
+ 'rle': {
132
+ # serialize as compressed COCO mask
133
+ 'counts': rle['counts'].decode('ascii'),
134
+ 'size': list(int(c) for c in rle['size']),
135
+ },
136
+ 'z_order': obj.z_order,
137
+ })
138
+ return converted
139
+
140
+ def _convert_shape_object(self, obj):
141
+ assert isinstance(obj, _Shape)
142
+ converted = self._convert_annotation(obj)
143
+
144
+ converted.update({
145
+ 'label_id': cast(obj.label, int),
146
+ 'points': [float(p) for p in obj.points],
147
+ 'z_order': obj.z_order,
148
+ })
149
+ return converted
150
+
151
+ def _convert_polyline_object(self, obj):
152
+ return self._convert_shape_object(obj)
153
+
154
+ def _convert_polygon_object(self, obj):
155
+ return self._convert_shape_object(obj)
156
+
157
+ def _convert_bbox_object(self, obj):
158
+ converted = self._convert_shape_object(obj)
159
+ converted.pop('points', None)
160
+ converted['bbox'] = [float(p) for p in obj.get_bbox()]
161
+ return converted
162
+
163
+ def _convert_points_object(self, obj):
164
+ converted = self._convert_shape_object(obj)
165
+
166
+ converted.update({
167
+ 'visibility': [int(v.value) for v in obj.visibility],
168
+ })
169
+ return converted
170
+
171
+ def _convert_caption_object(self, obj):
172
+ converted = self._convert_annotation(obj)
173
+
174
+ converted.update({
175
+ 'caption': cast(obj.caption, str),
176
+ })
177
+ return converted
178
+
179
+ def _convert_label_categories(self, obj):
180
+ converted = {
181
+ 'labels': [],
182
+ }
183
+ for label in obj.items:
184
+ converted['labels'].append({
185
+ 'name': cast(label.name, str),
186
+ 'parent': cast(label.parent, str),
187
+ })
188
+ return converted
189
+
190
+ def _convert_mask_categories(self, obj):
191
+ converted = {
192
+ 'colormap': [],
193
+ }
194
+ for label_id, color in obj.colormap.items():
195
+ converted['colormap'].append({
196
+ 'label_id': int(label_id),
197
+ 'r': int(color[0]),
198
+ 'g': int(color[1]),
199
+ 'b': int(color[2]),
200
+ })
201
+ return converted
202
+
203
+ def _convert_points_categories(self, obj):
204
+ converted = {
205
+ 'items': [],
206
+ }
207
+ for label_id, item in obj.items.items():
208
+ converted['items'].append({
209
+ 'label_id': int(label_id),
210
+ 'labels': [cast(label, str) for label in item.labels],
211
+ 'joints': [list(map(int, j)) for j in item.joints],
212
+ })
213
+ return converted
214
+
215
+ class DatumaroConverter(Converter):
216
+ DEFAULT_IMAGE_EXT = DatumaroPath.IMAGE_EXT
217
+
218
+ def apply(self):
219
+ os.makedirs(self._save_dir, exist_ok=True)
220
+
221
+ images_dir = osp.join(self._save_dir, DatumaroPath.IMAGES_DIR)
222
+ os.makedirs(images_dir, exist_ok=True)
223
+ self._images_dir = images_dir
224
+
225
+ annotations_dir = osp.join(self._save_dir, DatumaroPath.ANNOTATIONS_DIR)
226
+ os.makedirs(annotations_dir, exist_ok=True)
227
+ self._annotations_dir = annotations_dir
228
+
229
+ subsets = {s: _SubsetWriter(s, self) for s in self._extractor.subsets()}
230
+ for subset, writer in subsets.items():
231
+ writer.write_categories(self._extractor.categories())
232
+
233
+ for item in self._extractor:
234
+ subset = item.subset or DEFAULT_SUBSET_NAME
235
+ writer = subsets[subset]
236
+
237
+ writer.write_item(item)
238
+
239
+ for subset, writer in subsets.items():
240
+ writer.write(annotations_dir)
241
+
242
+ def _save_image(self, item, path=None):
243
+ super()._save_image(item,
244
+ osp.join(self._images_dir, self._make_image_filename(item)))
245
+
246
+ class DatumaroProjectConverter(Converter):
247
+ @classmethod
248
+ def convert(cls, extractor, save_dir, **kwargs):
249
+ os.makedirs(save_dir, exist_ok=True)
250
+
251
+ from datumaro.components.project import Project
252
+ project = Project.generate(save_dir,
253
+ config=kwargs.pop('project_config', None))
254
+
255
+ DatumaroConverter.convert(extractor,
256
+ save_dir=osp.join(
257
+ project.config.project_dir, project.config.dataset_dir),
258
+ **kwargs)
testbed/openvinotoolkit__datumaro/datumaro/plugins/datumaro_format/format.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ class DatumaroPath:
7
+ IMAGES_DIR = 'images'
8
+ ANNOTATIONS_DIR = 'annotations'
9
+ MASKS_DIR = 'masks'
10
+
11
+ IMAGE_EXT = '.jpg'
12
+ MASK_EXT = '.png'
testbed/openvinotoolkit__datumaro/datumaro/plugins/image_dir.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import logging as log
7
+ import os
8
+ import os.path as osp
9
+
10
+ from datumaro.components.extractor import DatasetItem, SourceExtractor, Importer
11
+ from datumaro.components.converter import Converter
12
+ from datumaro.util.image import Image
13
+
14
+
15
+ class ImageDirImporter(Importer):
16
+ @classmethod
17
+ def find_sources(cls, path):
18
+ if not osp.isdir(path):
19
+ return []
20
+ return [{ 'url': path, 'format': 'image_dir' }]
21
+
22
+ class ImageDirExtractor(SourceExtractor):
23
+ def __init__(self, url):
24
+ super().__init__()
25
+
26
+ assert osp.isdir(url), url
27
+
28
+ for dirpath, _, filenames in os.walk(url):
29
+ for name in filenames:
30
+ path = osp.join(dirpath, name)
31
+ image = Image(path=path)
32
+ try:
33
+ # force loading
34
+ image.data # pylint: disable=pointless-statement
35
+ except Exception:
36
+ continue
37
+
38
+ item_id = osp.relpath(osp.splitext(path)[0], url)
39
+ self._items.append(DatasetItem(id=item_id, image=image))
40
+
41
+ class ImageDirConverter(Converter):
42
+ DEFAULT_IMAGE_EXT = '.jpg'
43
+
44
+ def apply(self):
45
+ os.makedirs(self._save_dir, exist_ok=True)
46
+
47
+ for item in self._extractor:
48
+ if item.has_image:
49
+ self._save_image(item,
50
+ osp.join(self._save_dir, self._make_image_filename(item)))
51
+ else:
52
+ log.debug("Item '%s' has no image info", item.id)
testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_format.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from glob import glob
7
+ import logging as log
8
+ import os
9
+ import os.path as osp
10
+
11
+ from datumaro.components.extractor import (DatasetItem, Label,
12
+ LabelCategories, AnnotationType, SourceExtractor, Importer
13
+ )
14
+ from datumaro.components.converter import Converter
15
+
16
+
17
+ class ImagenetPath:
18
+ IMAGES_EXT = '.jpg'
19
+ IMAGES_DIR_NO_LABEL = 'no_label'
20
+
21
+
22
+ class ImagenetExtractor(SourceExtractor):
23
+ def __init__(self, path, subset=None):
24
+ assert osp.isdir(path), path
25
+ super().__init__(subset=subset)
26
+
27
+ self._categories = self._load_categories(path)
28
+ self._items = list(self._load_items(path).values())
29
+
30
+ def _load_categories(self, path):
31
+ label_cat = LabelCategories()
32
+ for images_dir in sorted(os.listdir(path)):
33
+ if images_dir != ImagenetPath.IMAGES_DIR_NO_LABEL:
34
+ label_cat.add(images_dir)
35
+ return { AnnotationType.label: label_cat }
36
+
37
+ def _load_items(self, path):
38
+ items = {}
39
+ for image_path in glob(osp.join(path, '*', '*')):
40
+ if osp.splitext(image_path)[1] != ImagenetPath.IMAGES_EXT:
41
+ continue
42
+ label = osp.basename(osp.dirname(image_path))
43
+ image_name = osp.splitext(osp.basename(image_path))[0][len(label) + 1:]
44
+ item = items.get(image_name)
45
+ if item is None:
46
+ item = DatasetItem(id=image_name, subset=self._subset,
47
+ image=image_path)
48
+ annotations = item.annotations
49
+ if label != ImagenetPath.IMAGES_DIR_NO_LABEL:
50
+ label = self._categories[AnnotationType.label].find(label)[0]
51
+ annotations.append(Label(label=label))
52
+ items[image_name] = item
53
+ return items
54
+
55
+
56
+ class ImagenetImporter(Importer):
57
+ @classmethod
58
+ def find_sources(cls, path):
59
+ if not osp.isdir(path):
60
+ return []
61
+ return [{ 'url': path, 'format': 'imagenet' }]
62
+
63
+
64
+ class ImagenetConverter(Converter):
65
+ DEFAULT_IMAGE_EXT = ImagenetPath.IMAGES_EXT
66
+
67
+ def apply(self):
68
+ if 1 < len(self._extractor.subsets()):
69
+ log.warning("ImageNet format supports exporting only a single "
70
+ "subset, subset information will not be used.")
71
+
72
+ subset_dir = self._save_dir
73
+ extractor = self._extractor
74
+ labels = {}
75
+ for item in self._extractor:
76
+ image_name = item.id
77
+ labels[image_name] = [p.label for p in item.annotations
78
+ if p.type == AnnotationType.label]
79
+ for label in labels[image_name]:
80
+ label_name = extractor.categories()[AnnotationType.label][label].name
81
+ self._save_image(item, osp.join(subset_dir, label_name,
82
+ '%s_%s%s' % \
83
+ (label_name, image_name, ImagenetPath.IMAGES_EXT)
84
+ ))
85
+
86
+ if not labels[image_name]:
87
+ self._save_image(item, osp.join(subset_dir,
88
+ ImagenetPath.IMAGES_DIR_NO_LABEL,
89
+ ImagenetPath.IMAGES_DIR_NO_LABEL + '_' +
90
+ image_name + ImagenetPath.IMAGES_EXT))
testbed/openvinotoolkit__datumaro/datumaro/plugins/imagenet_txt_format.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from glob import glob
7
+ import os
8
+ import os.path as osp
9
+
10
+ from datumaro.components.extractor import (DatasetItem, Label,
11
+ LabelCategories, AnnotationType, SourceExtractor, Importer
12
+ )
13
+ from datumaro.components.converter import Converter
14
+
15
+
16
+ class ImagenetTxtPath:
17
+ LABELS_FILE = 'synsets.txt'
18
+ IMAGE_DIR = 'images'
19
+
20
+ class ImagenetTxtExtractor(SourceExtractor):
21
+ def __init__(self, path, labels=None, image_dir=None):
22
+ assert osp.isfile(path), path
23
+ super().__init__(subset=osp.splitext(osp.basename(path))[0])
24
+
25
+ if not image_dir:
26
+ image_dir = ImagenetTxtPath.IMAGE_DIR
27
+ self.image_dir = osp.join(osp.dirname(path), image_dir)
28
+
29
+ if labels is None:
30
+ labels = osp.join(osp.dirname(path), ImagenetTxtPath.LABELS_FILE)
31
+ labels = self._parse_labels(labels)
32
+ else:
33
+ assert all(isinstance(e, str) for e in labels)
34
+
35
+ self._categories = self._load_categories(labels)
36
+ self._items = list(self._load_items(path).values())
37
+
38
+ @staticmethod
39
+ def _parse_labels(path):
40
+ with open(path, encoding='utf-8') as labels_file:
41
+ return [s.strip() for s in labels_file]
42
+
43
+ def _load_categories(self, labels):
44
+ return { AnnotationType.label: LabelCategories().from_iterable(labels) }
45
+
46
+ def _load_items(self, path):
47
+ items = {}
48
+ with open(path, encoding='utf-8') as f:
49
+ for line in f:
50
+ item = line.split()
51
+ item_id = item[0]
52
+ label_ids = [int(id) for id in item[1:]]
53
+ anno = []
54
+ for label in label_ids:
55
+ assert 0 <= label and \
56
+ label < len(self._categories[AnnotationType.label]), \
57
+ "Image '%s': unknown label id '%s'" % (item_id, label)
58
+ anno.append(Label(label))
59
+ items[item_id] = DatasetItem(id=item_id, subset=self._subset,
60
+ image=osp.join(self.image_dir, item_id + '.jpg'),
61
+ annotations=anno)
62
+ return items
63
+
64
+
65
+ class ImagenetTxtImporter(Importer):
66
+ @classmethod
67
+ def find_sources(cls, path):
68
+ subset_paths = [p for p in glob(osp.join(path, '*.txt'))
69
+ if osp.basename(p) != ImagenetTxtPath.LABELS_FILE]
70
+ sources = []
71
+ for subset_path in subset_paths:
72
+ sources += cls._find_sources_recursive(
73
+ subset_path, '.txt', 'imagenet_txt')
74
+ return sources
75
+
76
+
77
+ class ImagenetTxtConverter(Converter):
78
+ DEFAULT_IMAGE_EXT = '.jpg'
79
+
80
+ def apply(self):
81
+ subset_dir = self._save_dir
82
+ os.makedirs(subset_dir, exist_ok=True)
83
+
84
+ extractor = self._extractor
85
+ for subset_name, subset in self._extractor.subsets().items():
86
+ annotation_file = osp.join(subset_dir, '%s.txt' % subset_name)
87
+ labels = {}
88
+ for item in subset:
89
+ labels[item.id] = [str(p.label) for p in item.annotations
90
+ if p.type == AnnotationType.label]
91
+
92
+ if self._save_images and item.has_image:
93
+ self._save_image(item,
94
+ osp.join(self._save_dir, ImagenetTxtPath.IMAGE_DIR,
95
+ self._make_image_filename(item)))
96
+
97
+ with open(annotation_file, 'w', encoding='utf-8') as f:
98
+ f.writelines(['%s %s\n' % (item_id, ' '.join(labels[item_id]))
99
+ for item_id in labels])
100
+
101
+ labels_file = osp.join(subset_dir, ImagenetTxtPath.LABELS_FILE)
102
+ with open(labels_file, 'w', encoding='utf-8') as f:
103
+ f.write('\n'.join(l.name
104
+ for l in extractor.categories()[AnnotationType.label])
105
+ )
testbed/openvinotoolkit__datumaro/datumaro/plugins/labelme_format.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from collections import defaultdict
6
+ from defusedxml import ElementTree
7
+ import logging as log
8
+ import numpy as np
9
+ import os
10
+ import os.path as osp
11
+
12
+ from datumaro.components.extractor import (SourceExtractor, Importer,
13
+ DatasetItem, AnnotationType, Mask, Bbox, Polygon, LabelCategories
14
+ )
15
+ from datumaro.components.converter import Converter
16
+ from datumaro.util.image import Image, save_image
17
+ from datumaro.util.mask_tools import load_mask, find_mask_bbox
18
+
19
+
20
+ class LabelMePath:
21
+ MASKS_DIR = 'Masks'
22
+ IMAGE_EXT = '.jpg'
23
+
24
+ class LabelMeExtractor(SourceExtractor):
25
+ def __init__(self, path, subset_name=None):
26
+ assert osp.isdir(path), path
27
+ super().__init__(subset=subset_name)
28
+
29
+ items, categories = self._parse(path)
30
+ self._categories = categories
31
+ self._items = items
32
+
33
+ def _parse(self, path):
34
+ categories = {
35
+ AnnotationType.label: LabelCategories(attributes={
36
+ 'occluded', 'username'
37
+ })
38
+ }
39
+
40
+ items = []
41
+ for p in os.listdir(path):
42
+ if not p.endswith('.xml'):
43
+ continue
44
+ root = ElementTree.parse(osp.join(path, p))
45
+
46
+ item_id = osp.join(root.find('folder').text or '',
47
+ root.find('filename').text)
48
+ image_path = osp.join(path, item_id)
49
+ image_size = None
50
+ imagesize_elem = root.find('imagesize')
51
+ if imagesize_elem is not None:
52
+ width_elem = imagesize_elem.find('ncols')
53
+ height_elem = imagesize_elem.find('nrows')
54
+ image_size = (int(height_elem.text), int(width_elem.text))
55
+ image = Image(path=image_path, size=image_size)
56
+
57
+ annotations = self._parse_annotations(root, path, categories)
58
+
59
+ items.append(DatasetItem(id=osp.splitext(item_id)[0],
60
+ subset=self._subset, image=image, annotations=annotations))
61
+ return items, categories
62
+
63
+ @classmethod
64
+ def _parse_annotations(cls, xml_root, dataset_root, categories):
65
+ def parse_attributes(attr_str):
66
+ parsed = []
67
+ if not attr_str:
68
+ return parsed
69
+
70
+ for attr in [a.strip() for a in attr_str.split(',') if a.strip()]:
71
+ if '=' in attr:
72
+ name, value = attr.split('=', maxsplit=1)
73
+ if value.lower() in {'true', 'false'}:
74
+ value = value.lower() == 'true'
75
+ else:
76
+ try:
77
+ value = float(value)
78
+ except ValueError:
79
+ pass
80
+ parsed.append((name, value))
81
+ else:
82
+ parsed.append((attr, True))
83
+
84
+ return parsed
85
+
86
+ label_cat = categories[AnnotationType.label]
87
+ def get_label_id(label):
88
+ if not label:
89
+ return None
90
+ idx, _ = label_cat.find(label)
91
+ if idx is None:
92
+ idx = label_cat.add(label)
93
+ return idx
94
+
95
+ image_annotations = []
96
+
97
+ parsed_annotations = dict()
98
+ group_assignments = dict()
99
+ root_annotations = set()
100
+ for obj_elem in xml_root.iter('object'):
101
+ obj_id = int(obj_elem.find('id').text)
102
+
103
+ ann_items = []
104
+
105
+ label = get_label_id(obj_elem.find('name').text)
106
+
107
+ attributes = []
108
+ attributes_elem = obj_elem.find('attributes')
109
+ if attributes_elem is not None and attributes_elem.text:
110
+ attributes = parse_attributes(attributes_elem.text)
111
+
112
+ occluded = False
113
+ occluded_elem = obj_elem.find('occluded')
114
+ if occluded_elem is not None and occluded_elem.text:
115
+ occluded = (occluded_elem.text == 'yes')
116
+ attributes.append(('occluded', occluded))
117
+
118
+ deleted = False
119
+ deleted_elem = obj_elem.find('deleted')
120
+ if deleted_elem is not None and deleted_elem.text:
121
+ deleted = bool(int(deleted_elem.text))
122
+
123
+ user = ''
124
+
125
+ poly_elem = obj_elem.find('polygon')
126
+ segm_elem = obj_elem.find('segm')
127
+ type_elem = obj_elem.find('type') # the only value is 'bounding_box'
128
+ if poly_elem is not None:
129
+ user_elem = poly_elem.find('username')
130
+ if user_elem is not None and user_elem.text:
131
+ user = user_elem.text
132
+ attributes.append(('username', user))
133
+
134
+ points = []
135
+ for point_elem in poly_elem.iter('pt'):
136
+ x = float(point_elem.find('x').text)
137
+ y = float(point_elem.find('y').text)
138
+ points.append(x)
139
+ points.append(y)
140
+
141
+ if type_elem is not None and type_elem.text == 'bounding_box':
142
+ xmin = min(points[::2])
143
+ xmax = max(points[::2])
144
+ ymin = min(points[1::2])
145
+ ymax = max(points[1::2])
146
+ ann_items.append(Bbox(xmin, ymin, xmax - xmin, ymax - ymin,
147
+ label=label, attributes=attributes, id=obj_id,
148
+ ))
149
+ else:
150
+ ann_items.append(Polygon(points,
151
+ label=label, attributes=attributes, id=obj_id,
152
+ ))
153
+ elif segm_elem is not None:
154
+ user_elem = segm_elem.find('username')
155
+ if user_elem is not None and user_elem.text:
156
+ user = user_elem.text
157
+ attributes.append(('username', user))
158
+
159
+ mask_path = osp.join(dataset_root, LabelMePath.MASKS_DIR,
160
+ segm_elem.find('mask').text)
161
+ if not osp.isfile(mask_path):
162
+ raise Exception("Can't find mask at '%s'" % mask_path)
163
+ mask = load_mask(mask_path)
164
+ mask = np.any(mask, axis=2)
165
+ ann_items.append(Mask(image=mask, label=label, id=obj_id,
166
+ attributes=attributes))
167
+
168
+ if not deleted:
169
+ parsed_annotations[obj_id] = ann_items
170
+
171
+ # Find parents and children
172
+ parts_elem = obj_elem.find('parts')
173
+ if parts_elem is not None:
174
+ children_ids = []
175
+ hasparts_elem = parts_elem.find('hasparts')
176
+ if hasparts_elem is not None and hasparts_elem.text:
177
+ children_ids = [int(c) for c in hasparts_elem.text.split(',')]
178
+
179
+ parent_ids = []
180
+ ispartof_elem = parts_elem.find('ispartof')
181
+ if ispartof_elem is not None and ispartof_elem.text:
182
+ parent_ids = [int(c) for c in ispartof_elem.text.split(',')]
183
+
184
+ if children_ids and not parent_ids and hasparts_elem.text:
185
+ root_annotations.add(obj_id)
186
+ group_assignments[obj_id] = [None, children_ids]
187
+
188
+ # assign single group to all grouped annotations
189
+ current_group_id = 0
190
+ annotations_to_visit = list(root_annotations)
191
+ while annotations_to_visit:
192
+ ann_id = annotations_to_visit.pop()
193
+ ann_assignment = group_assignments[ann_id]
194
+ group_id, children_ids = ann_assignment
195
+ if group_id:
196
+ continue
197
+
198
+ if ann_id in root_annotations:
199
+ current_group_id += 1 # start a new group
200
+
201
+ group_id = current_group_id
202
+ ann_assignment[0] = group_id
203
+
204
+ # continue with children
205
+ annotations_to_visit.extend(children_ids)
206
+
207
+ assert current_group_id == len(root_annotations)
208
+
209
+ for ann_id, ann_items in parsed_annotations.items():
210
+ group_id = 0
211
+ if ann_id in group_assignments:
212
+ ann_assignment = group_assignments[ann_id]
213
+ group_id = ann_assignment[0]
214
+
215
+ for ann_item in ann_items:
216
+ if group_id:
217
+ ann_item.group = group_id
218
+
219
+ image_annotations.append(ann_item)
220
+
221
+ return image_annotations
222
+
223
+
224
+ class LabelMeImporter(Importer):
225
+ EXTRACTOR = 'label_me'
226
+
227
+ @classmethod
228
+ def find_sources(cls, path):
229
+ subset_paths = []
230
+ if not osp.isdir(path):
231
+ return []
232
+
233
+ path = osp.normpath(path)
234
+
235
+ def has_annotations(d):
236
+ return len([p for p in os.listdir(d) if p.endswith('.xml')]) != 0
237
+
238
+ if has_annotations(path):
239
+ subset_paths.append({'url': path, 'format': cls.EXTRACTOR})
240
+ else:
241
+ for d in os.listdir(path):
242
+ subset = d
243
+ d = osp.join(path, d)
244
+ if osp.isdir(d) and has_annotations(d):
245
+ subset_paths.append({'url': d, 'format': cls.EXTRACTOR,
246
+ 'options': {'subset_name': subset}
247
+ })
248
+ return subset_paths
249
+
250
+
251
+ class LabelMeConverter(Converter):
252
+ DEFAULT_IMAGE_EXT = LabelMePath.IMAGE_EXT
253
+
254
+ def apply(self):
255
+ for subset_name, subset in self._extractor.subsets().items():
256
+ subset_dir = osp.join(self._save_dir, subset_name)
257
+ os.makedirs(subset_dir, exist_ok=True)
258
+ os.makedirs(osp.join(subset_dir, LabelMePath.MASKS_DIR),
259
+ exist_ok=True)
260
+
261
+ for index, item in enumerate(subset):
262
+ self._save_item(item, subset_dir, index)
263
+
264
+ def _get_label(self, label_id):
265
+ if label_id is None:
266
+ return ''
267
+ return self._extractor.categories()[AnnotationType.label][label_id].name
268
+
269
+ def _save_item(self, item, subset_dir, index):
270
+ from lxml import etree as ET
271
+
272
+ log.debug("Converting item '%s'", item.id)
273
+
274
+ image_filename = self._make_image_filename(item)
275
+ if self._save_images:
276
+ if item.has_image and item.image.has_data:
277
+ self._save_image(item, osp.join(subset_dir, image_filename))
278
+ else:
279
+ log.debug("Item '%s' has no image", item.id)
280
+
281
+ root_elem = ET.Element('annotation')
282
+ ET.SubElement(root_elem, 'filename').text = osp.basename(image_filename)
283
+ ET.SubElement(root_elem, 'folder').text = osp.dirname(image_filename)
284
+
285
+ source_elem = ET.SubElement(root_elem, 'source')
286
+ ET.SubElement(source_elem, 'sourceImage').text = ''
287
+ ET.SubElement(source_elem, 'sourceAnnotation').text = 'Datumaro'
288
+
289
+ if item.has_image:
290
+ image_elem = ET.SubElement(root_elem, 'imagesize')
291
+ image_size = item.image.size
292
+ ET.SubElement(image_elem, 'nrows').text = str(image_size[0])
293
+ ET.SubElement(image_elem, 'ncols').text = str(image_size[1])
294
+
295
+ groups = defaultdict(list)
296
+
297
+ obj_id = 0
298
+ for ann in item.annotations:
299
+ if not ann.type in { AnnotationType.polygon,
300
+ AnnotationType.bbox, AnnotationType.mask }:
301
+ continue
302
+
303
+ obj_elem = ET.SubElement(root_elem, 'object')
304
+ ET.SubElement(obj_elem, 'name').text = self._get_label(ann.label)
305
+ ET.SubElement(obj_elem, 'deleted').text = '0'
306
+ ET.SubElement(obj_elem, 'verified').text = '0'
307
+ ET.SubElement(obj_elem, 'occluded').text = \
308
+ 'yes' if ann.attributes.pop('occluded', '') == True else 'no'
309
+ ET.SubElement(obj_elem, 'date').text = ''
310
+ ET.SubElement(obj_elem, 'id').text = str(obj_id)
311
+
312
+ parts_elem = ET.SubElement(obj_elem, 'parts')
313
+ if ann.group:
314
+ groups[ann.group].append((obj_id, parts_elem))
315
+ else:
316
+ ET.SubElement(parts_elem, 'hasparts').text = ''
317
+ ET.SubElement(parts_elem, 'ispartof').text = ''
318
+
319
+ if ann.type == AnnotationType.bbox:
320
+ ET.SubElement(obj_elem, 'type').text = 'bounding_box'
321
+
322
+ poly_elem = ET.SubElement(obj_elem, 'polygon')
323
+ x0, y0, x1, y1 = ann.points
324
+ points = [ (x0, y0), (x1, y0), (x1, y1), (x0, y1) ]
325
+ for x, y in points:
326
+ point_elem = ET.SubElement(poly_elem, 'pt')
327
+ ET.SubElement(point_elem, 'x').text = '%.2f' % x
328
+ ET.SubElement(point_elem, 'y').text = '%.2f' % y
329
+
330
+ ET.SubElement(poly_elem, 'username').text = \
331
+ str(ann.attributes.pop('username', ''))
332
+ elif ann.type == AnnotationType.polygon:
333
+ poly_elem = ET.SubElement(obj_elem, 'polygon')
334
+ for x, y in zip(ann.points[::2], ann.points[1::2]):
335
+ point_elem = ET.SubElement(poly_elem, 'pt')
336
+ ET.SubElement(point_elem, 'x').text = '%.2f' % x
337
+ ET.SubElement(point_elem, 'y').text = '%.2f' % y
338
+
339
+ ET.SubElement(poly_elem, 'username').text = \
340
+ str(ann.attributes.pop('username', ''))
341
+ elif ann.type == AnnotationType.mask:
342
+ mask_filename = '%s_mask_%s.png' % \
343
+ (item.id.replace('/', '_'), obj_id)
344
+ save_image(osp.join(subset_dir, LabelMePath.MASKS_DIR,
345
+ mask_filename),
346
+ self._paint_mask(ann.image))
347
+
348
+ segm_elem = ET.SubElement(obj_elem, 'segm')
349
+ ET.SubElement(segm_elem, 'mask').text = mask_filename
350
+
351
+ bbox = find_mask_bbox(ann.image)
352
+ box_elem = ET.SubElement(segm_elem, 'box')
353
+ ET.SubElement(box_elem, 'xmin').text = '%.2f' % bbox[0]
354
+ ET.SubElement(box_elem, 'ymin').text = '%.2f' % bbox[1]
355
+ ET.SubElement(box_elem, 'xmax').text = \
356
+ '%.2f' % (bbox[0] + bbox[2])
357
+ ET.SubElement(box_elem, 'ymax').text = \
358
+ '%.2f' % (bbox[1] + bbox[3])
359
+
360
+ ET.SubElement(segm_elem, 'username').text = \
361
+ str(ann.attributes.pop('username', ''))
362
+ else:
363
+ raise NotImplementedError("Unknown shape type '%s'" % ann.type)
364
+
365
+ attrs = []
366
+ for k, v in ann.attributes.items():
367
+ attrs.append('%s=%s' % (k, v))
368
+ ET.SubElement(obj_elem, 'attributes').text = ', '.join(attrs)
369
+
370
+ obj_id += 1
371
+
372
+ for _, group in groups.items():
373
+ leader_id, leader_parts_elem = group[0]
374
+ leader_parts = [str(o_id) for o_id, _ in group[1:]]
375
+ ET.SubElement(leader_parts_elem, 'hasparts').text = \
376
+ ','.join(leader_parts)
377
+ ET.SubElement(leader_parts_elem, 'ispartof').text = ''
378
+
379
+ for obj_id, parts_elem in group[1:]:
380
+ ET.SubElement(parts_elem, 'hasparts').text = ''
381
+ ET.SubElement(parts_elem, 'ispartof').text = str(leader_id)
382
+
383
+ xml_path = osp.join(subset_dir, 'item_%09d.xml' % index)
384
+ with open(xml_path, 'w', encoding='utf-8') as f:
385
+ xml_data = ET.tostring(root_elem, encoding='unicode',
386
+ pretty_print=True)
387
+ f.write(xml_data)
388
+
389
+ @staticmethod
390
+ def _paint_mask(mask):
391
+ # TODO: check if mask colors are random
392
+ return np.array([[0, 0, 0, 0], [255, 203, 0, 153]],
393
+ dtype=np.uint8)[mask.astype(np.uint8)]
testbed/openvinotoolkit__datumaro/datumaro/plugins/mot_format.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ # The Multiple Object Tracking Benchmark challenge format support
6
+ # Format description: https://arxiv.org/pdf/1906.04567.pdf
7
+ # Another description: https://motchallenge.net/instructions
8
+
9
+ from collections import OrderedDict
10
+ import csv
11
+ from enum import Enum
12
+ import logging as log
13
+ import os
14
+ import os.path as osp
15
+
16
+ from datumaro.components.extractor import (SourceExtractor, Importer,
17
+ DatasetItem, AnnotationType, Bbox, LabelCategories
18
+ )
19
+ from datumaro.components.converter import Converter
20
+ from datumaro.util import cast
21
+ from datumaro.util.image import Image
22
+
23
+
24
+ MotLabel = Enum('MotLabel', [
25
+ ('pedestrian', 1),
26
+ ('person on vehicle', 2),
27
+ ('car', 3),
28
+ ('bicycle', 4),
29
+ ('motorbike', 5),
30
+ ('non motorized vehicle', 6),
31
+ ('static person', 7),
32
+ ('distractor', 8),
33
+ ('occluder', 9),
34
+ ('occluder on the ground', 10),
35
+ ('occluder full', 11),
36
+ ('reflection', 12),
37
+ ])
38
+
39
+ class MotPath:
40
+ IMAGE_DIR = 'img1'
41
+ SEQINFO_FILE = 'seqinfo.ini'
42
+ LABELS_FILE = 'labels.txt'
43
+ GT_FILENAME = 'gt.txt'
44
+ DET_FILENAME = 'det.txt'
45
+
46
+ IMAGE_EXT = '.jpg'
47
+
48
+ FIELDS = [
49
+ 'frame_id',
50
+ 'track_id',
51
+ 'x',
52
+ 'y',
53
+ 'w',
54
+ 'h',
55
+ 'confidence', # or 'not ignored' flag for GT anns
56
+ 'class_id',
57
+ 'visibility'
58
+ ]
59
+
60
+
61
+ class MotSeqExtractor(SourceExtractor):
62
+ def __init__(self, path, labels=None, occlusion_threshold=0, is_gt=None):
63
+ super().__init__()
64
+
65
+ assert osp.isfile(path)
66
+ seq_root = osp.dirname(osp.dirname(path))
67
+ self._image_dir = ''
68
+ if osp.isdir(osp.join(seq_root, MotPath.IMAGE_DIR)):
69
+ self._image_dir = osp.join(seq_root, MotPath.IMAGE_DIR)
70
+
71
+ seq_info = osp.join(seq_root, MotPath.SEQINFO_FILE)
72
+ if osp.isfile(seq_info):
73
+ seq_info = self._parse_seq_info(seq_info)
74
+ self._image_dir = osp.join(seq_root, seq_info['imdir'])
75
+ else:
76
+ seq_info = None
77
+ self._seq_info = seq_info
78
+
79
+ self._occlusion_threshold = float(occlusion_threshold)
80
+
81
+ assert is_gt in {None, True, False}
82
+ if is_gt is None:
83
+ if osp.basename(path) == MotPath.DET_FILENAME:
84
+ is_gt = False
85
+ else:
86
+ is_gt = True
87
+ self._is_gt = is_gt
88
+
89
+ if labels is None:
90
+ labels = osp.join(osp.dirname(path), MotPath.LABELS_FILE)
91
+ if not osp.isfile(labels):
92
+ labels = [lbl.name for lbl in MotLabel]
93
+ if isinstance(labels, str):
94
+ labels = self._parse_labels(labels)
95
+ elif isinstance(labels, list):
96
+ assert all(isinstance(lbl, str) for lbl in labels), labels
97
+ else:
98
+ raise TypeError("Unexpected type of 'labels' argument: %s" % labels)
99
+ self._categories = self._load_categories(labels)
100
+ self._items = list(self._load_items(path).values())
101
+
102
+ @staticmethod
103
+ def _parse_labels(path):
104
+ with open(path, encoding='utf-8') as labels_file:
105
+ return [s.strip() for s in labels_file]
106
+
107
+ def _load_categories(self, labels):
108
+ attributes = ['track_id']
109
+ if self._is_gt:
110
+ attributes += ['occluded', 'visibility', 'ignored']
111
+ else:
112
+ attributes += ['score']
113
+ label_cat = LabelCategories(attributes=attributes)
114
+ for label in labels:
115
+ label_cat.add(label)
116
+
117
+ return { AnnotationType.label: label_cat }
118
+
119
+ def _load_items(self, path):
120
+ labels_count = len(self._categories[AnnotationType.label].items)
121
+ items = OrderedDict()
122
+
123
+ if self._seq_info:
124
+ for frame_id in range(self._seq_info['seqlength']):
125
+ items[frame_id] = DatasetItem(
126
+ id=frame_id,
127
+ subset=self._subset,
128
+ image=Image(
129
+ path=osp.join(self._image_dir,
130
+ '%06d%s' % (frame_id, self._seq_info['imext'])),
131
+ size=(self._seq_info['imheight'], self._seq_info['imwidth'])
132
+ )
133
+ )
134
+ elif osp.isdir(self._image_dir):
135
+ for p in os.listdir(self._image_dir):
136
+ if p.endswith(MotPath.IMAGE_EXT):
137
+ frame_id = int(osp.splitext(p)[0])
138
+ items[frame_id] = DatasetItem(
139
+ id=frame_id,
140
+ subset=self._subset,
141
+ image=osp.join(self._image_dir, p),
142
+ )
143
+
144
+ with open(path, newline='', encoding='utf-8') as csv_file:
145
+ # NOTE: Different MOT files have different count of fields
146
+ # (7, 9 or 10). This is handled by reader:
147
+ # - all extra fields go to a separate field
148
+ # - all unmet fields have None values
149
+ for row in csv.DictReader(csv_file, fieldnames=MotPath.FIELDS):
150
+ frame_id = int(row['frame_id'])
151
+ item = items.get(frame_id)
152
+ if item is None:
153
+ item = DatasetItem(id=frame_id, subset=self._subset)
154
+ annotations = item.annotations
155
+
156
+ x, y = float(row['x']), float(row['y'])
157
+ w, h = float(row['w']), float(row['h'])
158
+ label_id = row.get('class_id')
159
+ if label_id and label_id != '-1':
160
+ label_id = int(label_id) - 1
161
+ assert label_id < labels_count, label_id
162
+ else:
163
+ label_id = None
164
+
165
+ attributes = {}
166
+
167
+ # Annotations for detection task are not related to any track
168
+ track_id = int(row['track_id'])
169
+ if 0 < track_id:
170
+ attributes['track_id'] = track_id
171
+
172
+ confidence = cast(row.get('confidence'), float, 1)
173
+ visibility = cast(row.get('visibility'), float, 1)
174
+ if self._is_gt:
175
+ attributes['visibility'] = visibility
176
+ attributes['occluded'] = \
177
+ visibility <= self._occlusion_threshold
178
+ attributes['ignored'] = confidence == 0
179
+ else:
180
+ attributes['score'] = float(confidence)
181
+
182
+ annotations.append(Bbox(x, y, w, h, label=label_id,
183
+ attributes=attributes))
184
+
185
+ items[frame_id] = item
186
+ return items
187
+
188
+ @classmethod
189
+ def _parse_seq_info(cls, path):
190
+ fields = {}
191
+ with open(path, encoding='utf-8') as f:
192
+ for line in f:
193
+ entry = line.lower().strip().split('=', maxsplit=1)
194
+ if len(entry) == 2:
195
+ fields[entry[0]] = entry[1]
196
+ cls._check_seq_info(fields)
197
+ for k in { 'framerate', 'seqlength', 'imwidth', 'imheight' }:
198
+ fields[k] = int(fields[k])
199
+ return fields
200
+
201
+ @staticmethod
202
+ def _check_seq_info(seq_info):
203
+ assert set(seq_info) == {'name', 'imdir', 'framerate', 'seqlength', 'imwidth', 'imheight', 'imext'}, seq_info
204
+
205
+ class MotSeqImporter(Importer):
206
+ @classmethod
207
+ def find_sources(cls, path):
208
+ return cls._find_sources_recursive(path, '.txt', 'mot_seq',
209
+ filename=osp.join('gt', osp.splitext(MotPath.GT_FILENAME)[0]))
210
+
211
+ class MotSeqGtConverter(Converter):
212
+ DEFAULT_IMAGE_EXT = MotPath.IMAGE_EXT
213
+
214
+ def apply(self):
215
+ extractor = self._extractor
216
+
217
+ images_dir = osp.join(self._save_dir, MotPath.IMAGE_DIR)
218
+ os.makedirs(images_dir, exist_ok=True)
219
+ self._images_dir = images_dir
220
+
221
+ anno_dir = osp.join(self._save_dir, 'gt')
222
+ os.makedirs(anno_dir, exist_ok=True)
223
+ anno_file = osp.join(anno_dir, MotPath.GT_FILENAME)
224
+ with open(anno_file, 'w', encoding="utf-8") as csv_file:
225
+ writer = csv.DictWriter(csv_file, fieldnames=MotPath.FIELDS)
226
+
227
+ track_id_mapping = {-1: -1}
228
+ for idx, item in enumerate(extractor):
229
+ log.debug("Converting item '%s'", item.id)
230
+
231
+ frame_id = cast(item.id, int, 1 + idx)
232
+
233
+ for anno in item.annotations:
234
+ if anno.type != AnnotationType.bbox:
235
+ continue
236
+
237
+ track_id = int(anno.attributes.get('track_id', -1))
238
+ if track_id not in track_id_mapping:
239
+ track_id_mapping[track_id] = len(track_id_mapping)
240
+ track_id = track_id_mapping[track_id]
241
+
242
+ writer.writerow({
243
+ 'frame_id': frame_id,
244
+ 'track_id': track_id,
245
+ 'x': anno.x,
246
+ 'y': anno.y,
247
+ 'w': anno.w,
248
+ 'h': anno.h,
249
+ 'confidence': int(anno.attributes.get('ignored') != True),
250
+ 'class_id': 1 + cast(anno.label, int, -2),
251
+ 'visibility': float(
252
+ anno.attributes.get('visibility',
253
+ 1 - float(
254
+ anno.attributes.get('occluded', False)
255
+ )
256
+ )
257
+ )
258
+ })
259
+
260
+ if self._save_images:
261
+ if item.has_image and item.image.has_data:
262
+ self._save_image(item, osp.join(self._images_dir,
263
+ '%06d%s' % (frame_id, self._find_image_ext(item))))
264
+ else:
265
+ log.debug("Item '%s' has no image", item.id)
266
+
267
+ labels_file = osp.join(anno_dir, MotPath.LABELS_FILE)
268
+ with open(labels_file, 'w', encoding='utf-8') as f:
269
+ f.write('\n'.join(l.name
270
+ for l in extractor.categories()[AnnotationType.label])
271
+ )
testbed/openvinotoolkit__datumaro/datumaro/plugins/mots_format.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ # Implements MOTS format https://www.vision.rwth-aachen.de/page/mots
6
+
7
+ from enum import Enum
8
+ from glob import glob
9
+ import logging as log
10
+ import numpy as np
11
+ import os
12
+ import os.path as osp
13
+
14
+ from datumaro.components.extractor import (SourceExtractor, Importer,
15
+ DatasetItem, AnnotationType, Mask, LabelCategories
16
+ )
17
+ from datumaro.components.converter import Converter
18
+ from datumaro.util.image import load_image, save_image
19
+ from datumaro.util.mask_tools import merge_masks
20
+
21
+
22
+ class MotsPath:
23
+ MASKS_DIR = 'instances'
24
+ IMAGE_DIR = 'images'
25
+ IMAGE_EXT = '.jpg'
26
+ LABELS_FILE = 'labels.txt'
27
+ MAX_INSTANCES = 1000
28
+
29
+ MotsLabels = Enum('MotsLabels', [
30
+ ('background', 0),
31
+ ('car', 1),
32
+ ('pedestrian', 2),
33
+ ('ignored', 10),
34
+ ])
35
+
36
+ class MotsPngExtractor(SourceExtractor):
37
+ @staticmethod
38
+ def detect_dataset(path):
39
+ if osp.isdir(osp.join(path, MotsPath.MASKS_DIR)):
40
+ return [{'url': path, 'format': 'mots_png'}]
41
+ return []
42
+
43
+ def __init__(self, path, subset_name=None):
44
+ assert osp.isdir(path), path
45
+ super().__init__(subset=subset_name)
46
+ self._images_dir = osp.join(path, 'images')
47
+ self._anno_dir = osp.join(path, MotsPath.MASKS_DIR)
48
+ self._categories = self._parse_categories(
49
+ osp.join(self._anno_dir, MotsPath.LABELS_FILE))
50
+ self._items = self._parse_items()
51
+
52
+ def _parse_categories(self, path):
53
+ if osp.isfile(path):
54
+ with open(path) as f:
55
+ labels = [l.strip() for l in f]
56
+ else:
57
+ labels = [l.name for l in MotsLabels]
58
+ return { AnnotationType.label: LabelCategories.from_iterable(labels) }
59
+
60
+ def _parse_items(self):
61
+ items = []
62
+ for p in sorted(p for p in
63
+ glob(self._anno_dir + '/**/*.png', recursive=True)):
64
+ item_id = osp.splitext(osp.relpath(p, self._anno_dir))[0]
65
+ items.append(DatasetItem(id=item_id, subset=self._subset,
66
+ image=osp.join(self._images_dir, item_id + MotsPath.IMAGE_EXT),
67
+ annotations=self._parse_annotations(p)))
68
+ return items
69
+
70
+ @staticmethod
71
+ def _lazy_extract_mask(mask, v):
72
+ return lambda: mask == v
73
+
74
+ def _parse_annotations(self, path):
75
+ combined_mask = load_image(path, dtype=np.uint16)
76
+ masks = []
77
+ for obj_id in np.unique(combined_mask):
78
+ class_id, instance_id = divmod(obj_id, MotsPath.MAX_INSTANCES)
79
+ z_order = 0
80
+ if class_id == 0:
81
+ continue # background
82
+ if class_id == 10 and \
83
+ len(self._categories[AnnotationType.label]) < 10:
84
+ z_order = 1
85
+ class_id = self._categories[AnnotationType.label].find(
86
+ MotsLabels.ignored.name)[0]
87
+ else:
88
+ class_id -= 1
89
+ masks.append(Mask(self._lazy_extract_mask(combined_mask, obj_id),
90
+ label=class_id, z_order=z_order,
91
+ attributes={'track_id': instance_id}))
92
+ return masks
93
+
94
+
95
+ class MotsImporter(Importer):
96
+ @classmethod
97
+ def find_sources(cls, path):
98
+ subsets = MotsPngExtractor.detect_dataset(path)
99
+ if not subsets:
100
+ for p in os.listdir(path):
101
+ detected = MotsPngExtractor.detect_dataset(osp.join(path, p))
102
+ for s in detected:
103
+ s.setdefault('options', {})['subset_name'] = p
104
+ subsets.extend(detected)
105
+ return subsets
106
+
107
+
108
+ class MotsPngConverter(Converter):
109
+ DEFAULT_IMAGE_EXT = MotsPath.IMAGE_EXT
110
+
111
+ def apply(self):
112
+ for subset_name, subset in self._extractor.subsets().items():
113
+ subset_dir = osp.join(self._save_dir, subset_name)
114
+ images_dir = osp.join(subset_dir, MotsPath.IMAGE_DIR)
115
+ anno_dir = osp.join(subset_dir, MotsPath.MASKS_DIR)
116
+ os.makedirs(anno_dir, exist_ok=True)
117
+
118
+ for item in subset:
119
+ log.debug("Converting item '%s'", item.id)
120
+
121
+ if self._save_images:
122
+ if item.has_image and item.image.has_data:
123
+ self._save_image(item,
124
+ osp.join(images_dir, self._make_image_filename(item)))
125
+ else:
126
+ log.debug("Item '%s' has no image", item.id)
127
+
128
+ self._save_annotations(item, anno_dir)
129
+
130
+ with open(osp.join(anno_dir, MotsPath.LABELS_FILE), 'w') as f:
131
+ f.write('\n'.join(l.name
132
+ for l in subset.categories()[AnnotationType.label].items))
133
+
134
+ def _save_annotations(self, item, anno_dir):
135
+ masks = [a for a in item.annotations if a.type == AnnotationType.mask]
136
+ if not masks:
137
+ return
138
+
139
+ instance_ids = [int(a.attributes['track_id']) for a in masks]
140
+ masks = sorted(zip(masks, instance_ids), key=lambda e: e[0].z_order)
141
+ mask = merge_masks([
142
+ m.image * (MotsPath.MAX_INSTANCES * (1 + m.label) + id)
143
+ for m, id in masks])
144
+ save_image(osp.join(anno_dir, item.id + '.png'), mask,
145
+ create_dir=True, dtype=np.uint16)
testbed/openvinotoolkit__datumaro/datumaro/plugins/openvino_launcher.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ # pylint: disable=exec-used
7
+
8
+ import cv2
9
+ import logging as log
10
+ import numpy as np
11
+ import os.path as osp
12
+ import shutil
13
+
14
+ from openvino.inference_engine import IECore
15
+
16
+ from datumaro.components.cli_plugin import CliPlugin
17
+ from datumaro.components.launcher import Launcher
18
+
19
+
20
+ class OpenVinoImporter(CliPlugin):
21
+ @classmethod
22
+ def build_cmdline_parser(cls, **kwargs):
23
+ parser = super().build_cmdline_parser(**kwargs)
24
+ parser.add_argument('-d', '--description', required=True,
25
+ help="Path to the model description file (.xml)")
26
+ parser.add_argument('-w', '--weights', required=True,
27
+ help="Path to the model weights file (.bin)")
28
+ parser.add_argument('-i', '--interpreter', required=True,
29
+ help="Path to the network output interprter script (.py)")
30
+ parser.add_argument('--device', default='CPU',
31
+ help="Target device (default: %(default)s)")
32
+ return parser
33
+
34
+ @staticmethod
35
+ def copy_model(model_dir, model):
36
+ shutil.copy(model['description'],
37
+ osp.join(model_dir, osp.basename(model['description'])))
38
+ model['description'] = osp.basename(model['description'])
39
+
40
+ shutil.copy(model['weights'],
41
+ osp.join(model_dir, osp.basename(model['weights'])))
42
+ model['weights'] = osp.basename(model['weights'])
43
+
44
+ shutil.copy(model['interpreter'],
45
+ osp.join(model_dir, osp.basename(model['interpreter'])))
46
+ model['interpreter'] = osp.basename(model['interpreter'])
47
+
48
+
49
+ class InterpreterScript:
50
+ def __init__(self, path):
51
+ with open(path, 'r') as f:
52
+ script = f.read()
53
+
54
+ context = {}
55
+ exec(script, context, context)
56
+
57
+ process_outputs = context.get('process_outputs')
58
+ if not callable(process_outputs):
59
+ raise Exception("Can't find 'process_outputs' function in "
60
+ "the interpreter script")
61
+ self.__dict__['process_outputs'] = process_outputs
62
+
63
+ get_categories = context.get('get_categories')
64
+ assert get_categories is None or callable(get_categories)
65
+ if get_categories:
66
+ self.__dict__['get_categories'] = get_categories
67
+
68
+ @staticmethod
69
+ def get_categories():
70
+ return None
71
+
72
+ @staticmethod
73
+ def process_outputs(inputs, outputs):
74
+ raise NotImplementedError(
75
+ "Function should be implemented in the interpreter script")
76
+
77
+
78
+ class OpenVinoLauncher(Launcher):
79
+ cli_plugin = OpenVinoImporter
80
+
81
+ def __init__(self, description, weights, interpreter,
82
+ plugins_path=None, device=None, model_dir=None):
83
+ model_dir = model_dir or ''
84
+ if not osp.isfile(description):
85
+ description = osp.join(model_dir, description)
86
+ if not osp.isfile(description):
87
+ raise Exception('Failed to open model description file "%s"' % \
88
+ (description))
89
+
90
+ if not osp.isfile(weights):
91
+ weights = osp.join(model_dir, weights)
92
+ if not osp.isfile(weights):
93
+ raise Exception('Failed to open model weights file "%s"' % \
94
+ (weights))
95
+
96
+ if not osp.isfile(interpreter):
97
+ interpreter = osp.join(model_dir, interpreter)
98
+ if not osp.isfile(interpreter):
99
+ raise Exception('Failed to open model interpreter script file "%s"' % \
100
+ (interpreter))
101
+
102
+ self._interpreter = InterpreterScript(interpreter)
103
+
104
+ self._device = device or 'CPU'
105
+
106
+ self._ie = IECore()
107
+ if hasattr(self._ie, 'read_network'):
108
+ self._network = self._ie.read_network(description, weights)
109
+ else: # backward compatibility
110
+ from openvino.inference_engine import IENetwork
111
+ self._network = IENetwork.from_ir(description, weights)
112
+ self._check_model_support(self._network, self._device)
113
+ self._load_executable_net()
114
+
115
+ def _check_model_support(self, net, device):
116
+ supported_layers = set(self._ie.query_network(net, device))
117
+ not_supported_layers = set(net.layers) - supported_layers
118
+ if len(not_supported_layers) != 0:
119
+ log.error("The following layers are not supported " \
120
+ "by the plugin for device '%s': %s." % \
121
+ (device, ', '.join(not_supported_layers)))
122
+ raise NotImplementedError(
123
+ "Some layers are not supported on the device")
124
+
125
+ def _load_executable_net(self, batch_size=1):
126
+ network = self._network
127
+
128
+ iter_inputs = iter(network.inputs)
129
+ self._input_blob_name = next(iter_inputs)
130
+ self._output_blob_name = next(iter(network.outputs))
131
+
132
+ # NOTE: handling for the inclusion of `image_info` in OpenVino2019
133
+ self._require_image_info = 'image_info' in network.inputs
134
+ if self._input_blob_name == 'image_info':
135
+ self._input_blob_name = next(iter_inputs)
136
+
137
+ input_type = network.inputs[self._input_blob_name]
138
+ self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
139
+
140
+ self._input_layout[0] = batch_size
141
+ network.reshape({self._input_blob_name: self._input_layout})
142
+ self._batch_size = batch_size
143
+
144
+ self._net = self._ie.load_network(network=network, num_requests=1,
145
+ device_name=self._device)
146
+
147
+ def infer(self, inputs):
148
+ assert len(inputs.shape) == 4, \
149
+ "Expected an input image in (N, H, W, C) format, got %s" % \
150
+ (inputs.shape)
151
+ assert inputs.shape[3] == 3, "Expected BGR input, got %s" % inputs.shape
152
+
153
+ n, c, h, w = self._input_layout
154
+ if inputs.shape[1:3] != (h, w):
155
+ resized_inputs = np.empty((n, h, w, c), dtype=inputs.dtype)
156
+ for inp, resized_input in zip(inputs, resized_inputs):
157
+ cv2.resize(inp, (w, h), resized_input)
158
+ inputs = resized_inputs
159
+ inputs = inputs.transpose((0, 3, 1, 2)) # NHWC to NCHW
160
+ inputs = {self._input_blob_name: inputs}
161
+ if self._require_image_info:
162
+ info = np.zeros([1, 3])
163
+ info[0, 0] = h
164
+ info[0, 1] = w
165
+ info[0, 2] = 1.0 # scale
166
+ inputs['image_info'] = info
167
+
168
+ results = self._net.infer(inputs)
169
+ if len(results) == 1:
170
+ return results[self._output_blob_name]
171
+ else:
172
+ return results
173
+
174
+ def launch(self, inputs):
175
+ batch_size = len(inputs)
176
+ if self._batch_size < batch_size:
177
+ self._load_executable_net(batch_size)
178
+
179
+ outputs = self.infer(inputs)
180
+ results = self.process_outputs(inputs, outputs)
181
+ return results
182
+
183
+ def categories(self):
184
+ return self._interpreter.get_categories()
185
+
186
+ def process_outputs(self, inputs, outputs):
187
+ return self._interpreter.process_outputs(inputs, outputs)
188
+
testbed/openvinotoolkit__datumaro/datumaro/plugins/splitter.py ADDED
@@ -0,0 +1,522 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ import logging as log
6
+ import numpy as np
7
+
8
+ from datumaro.components.extractor import (Transform, AnnotationType,
9
+ DEFAULT_SUBSET_NAME)
10
+
11
+ NEAR_ZERO = 1e-7
12
+
13
+
14
+ class _TaskSpecificSplit(Transform):
15
+ def __init__(self, dataset, splits, seed):
16
+ super().__init__(dataset)
17
+
18
+ snames, sratio = self._validate_splits(splits)
19
+
20
+ self._snames = snames
21
+ self._sratio = sratio
22
+
23
+ self._seed = seed
24
+
25
+ self._subsets = {"train", "val", "test"} # output subset names
26
+ self._parts = []
27
+ self._length = "parent"
28
+
29
+ self._initialized = False
30
+
31
+ def _set_parts(self, by_splits):
32
+ self._parts = []
33
+ for subset in self._subsets:
34
+ self._parts.append((set(by_splits[subset]), subset))
35
+
36
+ @staticmethod
37
+ def _get_uniq_annotations(dataset):
38
+ annotations = []
39
+ for item in dataset:
40
+ labels = [a for a in item.annotations
41
+ if a.type == AnnotationType.label]
42
+ if len(labels) != 1:
43
+ raise Exception("Item '%s' contains %s labels, "
44
+ "but exactly one is expected" % (item.id, len(labels)))
45
+ annotations.append(labels[0])
46
+ return annotations
47
+
48
+ @staticmethod
49
+ def _validate_splits(splits, valid=None):
50
+ snames = []
51
+ ratios = []
52
+ if valid is None:
53
+ valid = ["train", "val", "test"]
54
+ for subset, ratio in splits:
55
+ assert subset in valid, \
56
+ "Subset name must be one of %s, but got %s" % (valid, subset)
57
+ assert 0.0 <= ratio and ratio <= 1.0, \
58
+ "Ratio is expected to be in the range " \
59
+ "[0, 1], but got %s for %s" % (ratio, subset)
60
+ snames.append(subset)
61
+ ratios.append(float(ratio))
62
+ ratios = np.array(ratios)
63
+
64
+ total_ratio = np.sum(ratios)
65
+ if not abs(total_ratio - 1.0) <= NEAR_ZERO:
66
+ raise Exception(
67
+ "Sum of ratios is expected to be 1, got %s, which is %s"
68
+ % (splits, total_ratio)
69
+ )
70
+ return snames, ratios
71
+
72
+ @staticmethod
73
+ def _get_required(ratio):
74
+ min_value = np.max(ratio)
75
+ for i in ratio:
76
+ if NEAR_ZERO < i and i < min_value:
77
+ min_value = i
78
+ required = int(np.around(1.0) / min_value)
79
+ return required
80
+
81
+ @staticmethod
82
+ def _get_sections(dataset_size, ratio):
83
+ n_splits = [int(np.around(dataset_size * r)) for r in ratio[:-1]]
84
+ n_splits.append(dataset_size - np.sum(n_splits))
85
+
86
+ # if there are splits with zero samples even if ratio is not 0,
87
+ # borrow one from the split who has one or more.
88
+ for ii, num_split in enumerate(n_splits):
89
+ if num_split == 0 and NEAR_ZERO < ratio[ii]:
90
+ midx = np.argmax(n_splits)
91
+ if n_splits[midx] > 0:
92
+ n_splits[ii] += 1
93
+ n_splits[midx] -= 1
94
+ sections = np.add.accumulate(n_splits[:-1])
95
+ return sections
96
+
97
+ @staticmethod
98
+ def _group_by_attr(items):
99
+ """
100
+ Args:
101
+ items: list of (idx, ann). ann is the annotation from Label object.
102
+ Returns:
103
+ by_attributes: dict of { combination-of-attrs : list of index }
104
+ """
105
+ # group by attributes
106
+ by_attributes = dict()
107
+ for idx, ann in items:
108
+ attributes = tuple(sorted(ann.attributes.items()))
109
+ if attributes not in by_attributes:
110
+ by_attributes[attributes] = []
111
+ by_attributes[attributes].append(idx)
112
+ return by_attributes
113
+
114
+ def _split_by_attr(self, datasets, snames, ratio, out_splits,
115
+ dataset_key="label"):
116
+ required = self._get_required(ratio)
117
+ for key, items in datasets.items():
118
+ np.random.shuffle(items)
119
+ by_attributes = self._group_by_attr(items)
120
+ for attributes, indice in by_attributes.items():
121
+ gname = "%s: %s, attrs: %s" % (dataset_key, key, attributes)
122
+ splits = self._split_indice(indice, gname, ratio, required)
123
+ for subset, split in zip(snames, splits):
124
+ if 0 < len(split):
125
+ out_splits[subset].extend(split)
126
+
127
+ def _split_indice(self, indice, group_name, ratio, required):
128
+ filtered_size = len(indice)
129
+ if filtered_size < required:
130
+ log.warning("Not enough samples for a group, '%s'" % group_name)
131
+ sections = self._get_sections(filtered_size, ratio)
132
+ splits = np.array_split(indice, sections)
133
+ return splits
134
+
135
+ def _find_split(self, index):
136
+ for subset_indices, subset in self._parts:
137
+ if index in subset_indices:
138
+ return subset
139
+ return DEFAULT_SUBSET_NAME # all the possible remainder --> default
140
+
141
+ def _split_dataset(self):
142
+ raise NotImplementedError()
143
+
144
+ def __iter__(self):
145
+ # lazy splitting
146
+ if self._initialized is False:
147
+ self._split_dataset()
148
+ self._initialized = True
149
+ for i, item in enumerate(self._extractor):
150
+ yield self.wrap_item(item, subset=self._find_split(i))
151
+
152
+
153
+ class ClassificationSplit(_TaskSpecificSplit):
154
+ """
155
+ Splits dataset into train/val/test set in class-wise manner. |n
156
+ |n
157
+ Notes:|n
158
+ - Single label is expected for each DatasetItem.|n
159
+ - If there are not enough images in some class or attributes group,
160
+ the split ratio can't be guaranteed.|n
161
+ """
162
+
163
+ def __init__(self, dataset, splits, seed=None):
164
+ """
165
+ Parameters
166
+ ----------
167
+ dataset : Dataset
168
+ splits : list
169
+ A list of (subset(str), ratio(float))
170
+ Subset is expected to be one of ["train", "val", "test"].
171
+ The sum of ratios is expected to be 1.
172
+ seed : int, optional
173
+ """
174
+ super().__init__(dataset, splits, seed)
175
+
176
+ def _split_dataset(self):
177
+ np.random.seed(self._seed)
178
+
179
+ # support only single label for a DatasetItem
180
+ # 1. group by label
181
+ by_labels = dict()
182
+ annotations = self._get_uniq_annotations(self._extractor)
183
+ for idx, ann in enumerate(annotations):
184
+ label = getattr(ann, 'label', None)
185
+ if label not in by_labels:
186
+ by_labels[label] = []
187
+ by_labels[label].append((idx, ann))
188
+
189
+ by_splits = dict()
190
+ for subset in self._subsets:
191
+ by_splits[subset] = []
192
+
193
+ # 2. group by attributes
194
+ self._split_by_attr(by_labels, self._snames, self._sratio, by_splits)
195
+ self._set_parts(by_splits)
196
+
197
+
198
+ class MatchingReIDSplit(_TaskSpecificSplit):
199
+ """
200
+ Splits dataset for matching, especially re-id task.|n
201
+ First, splits dataset into 'train+val' and 'test' sets by person id.|n
202
+ Note that this splitting is not by DatasetItem. |n
203
+ Then, tags 'test' into 'gallery'/'query' in class-wise random manner.|n
204
+ Then, splits 'train+val' into 'train'/'val' sets in the same way.|n
205
+ Therefore, the final subsets would be 'train', 'val', 'test'. |n
206
+ And 'gallery', 'query' are tagged using anntoation group.|n
207
+ You can get the 'gallery' and 'query' sets using 'get_subset_by_group'.|n
208
+ Notes:|n
209
+ - Single label is expected for each DatasetItem.|n
210
+ - Each label is expected to have attribute representing the person id. |n
211
+ """
212
+
213
+ _group_map = dict()
214
+
215
+ def __init__(self, dataset, splits, test_splits, pid_name="PID", seed=None):
216
+ """
217
+ Parameters
218
+ ----------
219
+ dataset : Dataset
220
+ splits : list
221
+ A list of (subset(str), ratio(float))
222
+ Subset is expected to be one of ["train", "val", "test"].
223
+ The sum of ratios is expected to be 1.
224
+ test_splits : list
225
+ A list of (subset(str), ratio(float))
226
+ Subset is expected to be one of ["gallery", "query"].
227
+ The sum of ratios is expected to be 1.
228
+ pid_name: str
229
+ attribute name representing the person id. (default: PID)
230
+ seed : int, optional
231
+ """
232
+ super().__init__(dataset, splits, seed)
233
+
234
+ self._test_splits = test_splits
235
+ self._pid_name = pid_name
236
+
237
+ def _split_dataset(self):
238
+ np.random.seed(self._seed)
239
+
240
+ id_snames, id_ratio = self._snames, self._sratio
241
+
242
+ pid_name = self._pid_name
243
+ dataset = self._extractor
244
+
245
+ groups = set()
246
+
247
+ # group by PID(pid_name)
248
+ by_pid = dict()
249
+ annotations = self._get_uniq_annotations(dataset)
250
+ for idx, ann in enumerate(annotations):
251
+ attributes = dict(ann.attributes.items())
252
+ assert pid_name in attributes, \
253
+ "'%s' is expected as an attribute name" % pid_name
254
+ person_id = attributes[pid_name]
255
+ if person_id not in by_pid:
256
+ by_pid[person_id] = []
257
+ by_pid[person_id].append((idx, ann))
258
+ groups.add(ann.group)
259
+
260
+ max_group_id = max(groups)
261
+ self._group_map["gallery"] = max_group_id + 1
262
+ self._group_map["query"] = max_group_id + 2
263
+
264
+ required = self._get_required(id_ratio)
265
+ if len(by_pid) < required:
266
+ log.warning("There's not enough IDs, which is %s, "
267
+ "so train/val/test ratio can't be guaranteed."
268
+ % len(by_pid)
269
+ )
270
+
271
+ # 1. split dataset into trval and test
272
+ # IDs in test set should not exist in train/val set.
273
+ test = id_ratio[id_snames.index("test")] if "test" in id_snames else 0
274
+ if NEAR_ZERO < test: # has testset
275
+ split_ratio = np.array([test, 1.0 - test])
276
+ person_ids = list(by_pid.keys())
277
+ np.random.shuffle(person_ids)
278
+ sections = self._get_sections(len(person_ids), split_ratio)
279
+ splits = np.array_split(person_ids, sections)
280
+ testset = {pid: by_pid[pid] for pid in splits[0]}
281
+ trval = {pid: by_pid[pid] for pid in splits[1]}
282
+
283
+ # follow the ratio of datasetitems as possible.
284
+ # naive heuristic: exchange the best item one by one.
285
+ expected_count = int(len(self._extractor) * split_ratio[0])
286
+ testset_total = int(np.sum([len(v) for v in testset.values()]))
287
+ self._rebalancing(testset, trval, expected_count, testset_total)
288
+ else:
289
+ testset = dict()
290
+ trval = by_pid
291
+
292
+ by_splits = dict()
293
+ for subset in self._subsets:
294
+ by_splits[subset] = []
295
+
296
+ # 2. split 'test' into 'gallery' and 'query'
297
+ if 0 < len(testset):
298
+ for person_id, items in testset.items():
299
+ indice = [idx for idx, _ in items]
300
+ by_splits["test"].extend(indice)
301
+
302
+ valid = ["gallery", "query"]
303
+ test_splits = self._test_splits
304
+ test_snames, test_ratio = self._validate_splits(test_splits, valid)
305
+ by_groups = {s: [] for s in test_snames}
306
+ self._split_by_attr(testset, test_snames, test_ratio, by_groups,
307
+ dataset_key=pid_name)
308
+
309
+ # tag using group
310
+ for idx, item in enumerate(self._extractor):
311
+ for subset, split in by_groups.items():
312
+ if idx in split:
313
+ group_id = self._group_map[subset]
314
+ item.annotations[0].group = group_id
315
+ break
316
+
317
+ # 3. split 'trval' into 'train' and 'val'
318
+ trval_snames = ["train", "val"]
319
+ trval_ratio = []
320
+ for subset in trval_snames:
321
+ if subset in id_snames:
322
+ val = id_ratio[id_snames.index(subset)]
323
+ else:
324
+ val = 0.0
325
+ trval_ratio.append(val)
326
+ trval_ratio = np.array(trval_ratio)
327
+ total_ratio = np.sum(trval_ratio)
328
+ if total_ratio < NEAR_ZERO:
329
+ trval_splits = list(zip(["train", "val"], trval_ratio))
330
+ log.warning("Sum of ratios is expected to be positive, "
331
+ "got %s, which is %s"
332
+ % (trval_splits, total_ratio)
333
+ )
334
+ else:
335
+ trval_ratio /= total_ratio # normalize
336
+ self._split_by_attr(trval, trval_snames, trval_ratio, by_splits,
337
+ dataset_key=pid_name)
338
+
339
+ self._set_parts(by_splits)
340
+
341
+ @staticmethod
342
+ def _rebalancing(test, trval, expected_count, testset_total):
343
+ diffs = dict()
344
+ for id_test, items_test in test.items():
345
+ count_test = len(items_test)
346
+ for id_trval, items_trval in trval.items():
347
+ count_trval = len(items_trval)
348
+ diff = count_trval - count_test
349
+ if diff == 0:
350
+ continue # exchange has no effect
351
+ if diff not in diffs:
352
+ diffs[diff] = [(id_test, id_trval)]
353
+ else:
354
+ diffs[diff].append((id_test, id_trval))
355
+ exchanges = []
356
+ while True:
357
+ target_diff = expected_count - testset_total
358
+ # find nearest diff.
359
+ keys = np.array(list(diffs.keys()))
360
+ idx = (np.abs(keys - target_diff)).argmin()
361
+ nearest = keys[idx]
362
+ if abs(target_diff) <= abs(target_diff - nearest):
363
+ break
364
+ choice = np.random.choice(range(len(diffs[nearest])))
365
+ pid_test, pid_trval = diffs[nearest][choice]
366
+ testset_total += nearest
367
+ new_diffs = dict()
368
+ for diff, person_ids in diffs.items():
369
+ new_list = []
370
+ for id1, id2 in person_ids:
371
+ if id1 == pid_test or id2 == pid_trval:
372
+ continue
373
+ new_list.append((id1, id2))
374
+ if 0 < len(new_list):
375
+ new_diffs[diff] = new_list
376
+ diffs = new_diffs
377
+ exchanges.append((pid_test, pid_trval))
378
+ # exchange
379
+ for pid_test, pid_trval in exchanges:
380
+ test[pid_trval] = trval.pop(pid_trval)
381
+ trval[pid_test] = test.pop(pid_test)
382
+
383
+ def get_subset_by_group(self, group: str):
384
+ available = list(self._group_map.keys())
385
+ assert group in self._group_map, \
386
+ "Unknown group '%s', available groups: %s" \
387
+ % (group, available)
388
+ group_id = self._group_map[group]
389
+ return self.select(lambda item: item.annotations[0].group == group_id)
390
+
391
+
392
+ class DetectionSplit(_TaskSpecificSplit):
393
+ """
394
+ Splits dataset into train/val/test set for detection task.|n
395
+ For detection dataset, each image can have multiple bbox annotations.|n
396
+ Since one DataItem can't be included in multiple subsets at the same time,
397
+ the dataset can't be divided according to the bbox annotations.|n
398
+ Thus, we split dataset based on DatasetItem
399
+ while preserving label distribution as possible.|n
400
+ |n
401
+ Notes:|n
402
+ - Each DatsetItem is expected to have one or more Bbox annotations.|n
403
+ - Label annotations are ignored. We only focus on the Bbox annotations.|n
404
+ """
405
+
406
+ def __init__(self, dataset, splits, seed=None):
407
+ """
408
+ Parameters
409
+ ----------
410
+ dataset : Dataset
411
+ splits : list
412
+ A list of (subset(str), ratio(float))
413
+ Subset is expected to be one of ["train", "val", "test"].
414
+ The sum of ratios is expected to be 1.
415
+ seed : int, optional
416
+ """
417
+ super().__init__(dataset, splits, seed)
418
+
419
+ @staticmethod
420
+ def _group_by_bbox_labels(dataset):
421
+ by_labels = dict()
422
+ for idx, item in enumerate(dataset):
423
+ bbox_anns = [a for a in item.annotations
424
+ if a.type == AnnotationType.bbox]
425
+ assert 0 < len(bbox_anns), \
426
+ "Expected more than one bbox annotation in the dataset"
427
+ for ann in bbox_anns:
428
+ label = getattr(ann, 'label', None)
429
+ if label not in by_labels:
430
+ by_labels[label] = [(idx, ann)]
431
+ else:
432
+ by_labels[label].append((idx, ann))
433
+ return by_labels
434
+
435
+ def _split_dataset(self):
436
+ np.random.seed(self._seed)
437
+
438
+ subsets, sratio = self._snames, self._sratio
439
+
440
+ # 1. group by bbox label
441
+ by_labels = self._group_by_bbox_labels(self._extractor)
442
+
443
+ # 2. group by attributes
444
+ by_combinations = dict()
445
+ for label, items in by_labels.items():
446
+ by_attributes = self._group_by_attr(items)
447
+ for attributes, indice in by_attributes.items():
448
+ gname = "label: %s, attributes: %s" % (label, attributes)
449
+ by_combinations[gname] = indice
450
+
451
+ # total number of GT samples per label-attr combinations
452
+ n_combs = {k: len(v) for k, v in by_combinations.items()}
453
+
454
+ # 3-1. initially count per-image GT samples
455
+ scores_all = {}
456
+ init_scores = {}
457
+ for idx, _ in enumerate(self._extractor):
458
+ counts = {k: v.count(idx) for k, v in by_combinations.items()}
459
+ scores_all[idx] = counts
460
+ init_scores[idx] = np.sum(
461
+ [v / n_combs[k] for k, v in counts.items()]
462
+ )
463
+
464
+ by_splits = dict()
465
+ for sname in self._subsets:
466
+ by_splits[sname] = []
467
+
468
+ total = len(self._extractor)
469
+ target_size = dict()
470
+ expected = [] # expected numbers of per split GT samples
471
+ for sname, ratio in zip(subsets, sratio):
472
+ target_size[sname] = total * ratio
473
+ expected.append(
474
+ (sname, {k: v * ratio for k, v in n_combs.items()})
475
+ )
476
+
477
+ ##
478
+ # functions for keep the # of annotations not exceed the expected num
479
+ def compute_penalty(counts, n_combs):
480
+ p = 0
481
+ for k, v in counts.items():
482
+ p += max(0, (v / n_combs[k]) - 1.0)
483
+ return p
484
+
485
+ def update_nc(counts, n_combs):
486
+ for k, v in counts.items():
487
+ n_combs[k] = max(0, n_combs[k] - v)
488
+ if n_combs[k] == 0:
489
+ n_combs[k] = -1
490
+ return n_combs
491
+
492
+ ##
493
+
494
+ # 3-2. assign each DatasetItem to a split, one by one
495
+ for idx, _ in sorted(
496
+ init_scores.items(), key=lambda item: item[1], reverse=True
497
+ ):
498
+ counts = scores_all[idx]
499
+
500
+ # shuffling split order to add randomness
501
+ # when two or more splits have the same penalty value
502
+ np.random.shuffle(expected)
503
+
504
+ pp = []
505
+ for sname, nc in expected:
506
+ if target_size[sname] <= len(by_splits[sname]):
507
+ # the split has enough images,
508
+ # stop adding more images to this split
509
+ pp.append(1e08)
510
+ else:
511
+ # compute penalty based on the number of GT samples
512
+ # added in the split
513
+ pp.append(compute_penalty(counts, nc))
514
+
515
+ # we push an image to a split with the minimum penalty
516
+ midx = np.argmin(pp)
517
+
518
+ sname, nc = expected[midx]
519
+ by_splits[sname].append(idx)
520
+ update_nc(counts, nc)
521
+
522
+ self._set_parts(by_splits)
testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/converter.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import codecs
7
+ from collections import OrderedDict
8
+ import hashlib
9
+ import logging as log
10
+ import os
11
+ import os.path as osp
12
+ import string
13
+
14
+ from datumaro.components.extractor import (AnnotationType, DEFAULT_SUBSET_NAME,
15
+ LabelCategories
16
+ )
17
+ from datumaro.components.converter import Converter
18
+ from datumaro.util.image import encode_image, ByteImage
19
+ from datumaro.util.annotation_util import (max_bbox,
20
+ find_group_leader, find_instances)
21
+ from datumaro.util.mask_tools import merge_masks
22
+ from datumaro.util.tf_util import import_tf as _import_tf
23
+
24
+ from .format import DetectionApiPath
25
+ tf = _import_tf()
26
+
27
+
28
+ # filter out non-ASCII characters, otherwise training will crash
29
+ _printable = set(string.printable)
30
+ def _make_printable(s):
31
+ return ''.join(filter(lambda x: x in _printable, s))
32
+
33
+ def int64_feature(value):
34
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
35
+
36
+ def int64_list_feature(value):
37
+ return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
38
+
39
+ def bytes_feature(value):
40
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
41
+
42
+ def bytes_list_feature(value):
43
+ return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
44
+
45
+ def float_list_feature(value):
46
+ return tf.train.Feature(float_list=tf.train.FloatList(value=value))
47
+
48
+ class TfDetectionApiConverter(Converter):
49
+ DEFAULT_IMAGE_EXT = DetectionApiPath.DEFAULT_IMAGE_EXT
50
+
51
+ @classmethod
52
+ def build_cmdline_parser(cls, **kwargs):
53
+ parser = super().build_cmdline_parser(**kwargs)
54
+ parser.add_argument('--save-masks', action='store_true',
55
+ help="Include instance masks (default: %(default)s)")
56
+ return parser
57
+
58
+ def __init__(self, extractor, save_dir, save_masks=False, **kwargs):
59
+ super().__init__(extractor, save_dir, **kwargs)
60
+
61
+ self._save_masks = save_masks
62
+
63
+ def apply(self):
64
+ os.makedirs(self._save_dir, exist_ok=True)
65
+
66
+ label_categories = self._extractor.categories().get(AnnotationType.label,
67
+ LabelCategories())
68
+ get_label = lambda label_id: label_categories.items[label_id].name \
69
+ if label_id is not None else ''
70
+ label_ids = OrderedDict((label.name, 1 + idx)
71
+ for idx, label in enumerate(label_categories.items))
72
+ map_label_id = lambda label_id: label_ids.get(get_label(label_id), 0)
73
+ self._get_label = get_label
74
+ self._get_label_id = map_label_id
75
+
76
+ for subset_name, subset in self._extractor.subsets().items():
77
+ labelmap_path = osp.join(self._save_dir, DetectionApiPath.LABELMAP_FILE)
78
+ with codecs.open(labelmap_path, 'w', encoding='utf8') as f:
79
+ for label, idx in label_ids.items():
80
+ f.write(
81
+ 'item {\n' +
82
+ ('\tid: %s\n' % (idx)) +
83
+ ("\tname: '%s'\n" % (label)) +
84
+ '}\n\n'
85
+ )
86
+
87
+ anno_path = osp.join(self._save_dir, '%s.tfrecord' % (subset_name))
88
+ with tf.io.TFRecordWriter(anno_path) as writer:
89
+ for item in subset:
90
+ tf_example = self._make_tf_example(item)
91
+ writer.write(tf_example.SerializeToString())
92
+
93
+ @staticmethod
94
+ def _find_instances(annotations):
95
+ return find_instances(a for a in annotations
96
+ if a.type in { AnnotationType.bbox, AnnotationType.mask })
97
+
98
+ def _find_instance_parts(self, group, img_width, img_height):
99
+ boxes = [a for a in group if a.type == AnnotationType.bbox]
100
+ masks = [a for a in group if a.type == AnnotationType.mask]
101
+
102
+ anns = boxes + masks
103
+ leader = find_group_leader(anns)
104
+ bbox = max_bbox(anns)
105
+
106
+ mask = None
107
+ if self._save_masks:
108
+ mask = merge_masks([m.image for m in masks])
109
+
110
+ return [leader, mask, bbox]
111
+
112
+ def _export_instances(self, instances, width, height):
113
+ xmins = [] # List of normalized left x coordinates of bounding boxes (1 per box)
114
+ xmaxs = [] # List of normalized right x coordinates of bounding boxes (1 per box)
115
+ ymins = [] # List of normalized top y coordinates of bounding boxes (1 per box)
116
+ ymaxs = [] # List of normalized bottom y coordinates of bounding boxes (1 per box)
117
+ classes_text = [] # List of class names of bounding boxes (1 per box)
118
+ classes = [] # List of class ids of bounding boxes (1 per box)
119
+ masks = [] # List of PNG-encoded instance masks (1 per box)
120
+
121
+ for leader, mask, box in instances:
122
+ label = _make_printable(self._get_label(leader.label))
123
+ classes_text.append(label.encode('utf-8'))
124
+ classes.append(self._get_label_id(leader.label))
125
+
126
+ xmins.append(box[0] / width)
127
+ xmaxs.append((box[0] + box[2]) / width)
128
+ ymins.append(box[1] / height)
129
+ ymaxs.append((box[1] + box[3]) / height)
130
+
131
+ if self._save_masks:
132
+ if mask is not None:
133
+ mask = encode_image(mask, '.png')
134
+ else:
135
+ mask = b''
136
+ masks.append(mask)
137
+
138
+ result = {}
139
+ if classes:
140
+ result = {
141
+ 'image/object/bbox/xmin': float_list_feature(xmins),
142
+ 'image/object/bbox/xmax': float_list_feature(xmaxs),
143
+ 'image/object/bbox/ymin': float_list_feature(ymins),
144
+ 'image/object/bbox/ymax': float_list_feature(ymaxs),
145
+ 'image/object/class/text': bytes_list_feature(classes_text),
146
+ 'image/object/class/label': int64_list_feature(classes),
147
+ }
148
+ if masks:
149
+ result['image/object/mask'] = bytes_list_feature(masks)
150
+ return result
151
+
152
+ def _make_tf_example(self, item):
153
+ features = {
154
+ 'image/source_id': bytes_feature(
155
+ str(item.attributes.get('source_id') or '').encode('utf-8')
156
+ ),
157
+ }
158
+
159
+ filename = self._make_image_filename(item)
160
+ features['image/filename'] = bytes_feature(filename.encode('utf-8'))
161
+
162
+ if not item.has_image:
163
+ raise Exception("Failed to export dataset item '%s': "
164
+ "item has no image info" % item.id)
165
+ height, width = item.image.size
166
+
167
+ features.update({
168
+ 'image/height': int64_feature(height),
169
+ 'image/width': int64_feature(width),
170
+ })
171
+
172
+ features.update({
173
+ 'image/encoded': bytes_feature(b''),
174
+ 'image/format': bytes_feature(b''),
175
+ 'image/key/sha256': bytes_feature(b''),
176
+ })
177
+ if self._save_images:
178
+ if item.has_image and item.image.has_data:
179
+ buffer, fmt = self._save_image(item, filename)
180
+ key = hashlib.sha256(buffer).hexdigest()
181
+
182
+ features.update({
183
+ 'image/encoded': bytes_feature(buffer),
184
+ 'image/format': bytes_feature(fmt.encode('utf-8')),
185
+ 'image/key/sha256': bytes_feature(key.encode('utf8')),
186
+ })
187
+ else:
188
+ log.warning("Item '%s' has no image" % item.id)
189
+
190
+ instances = self._find_instances(item.annotations)
191
+ instances = [self._find_instance_parts(i, width, height) for i in instances]
192
+ features.update(self._export_instances(instances, width, height))
193
+
194
+ tf_example = tf.train.Example(
195
+ features=tf.train.Features(feature=features))
196
+
197
+ return tf_example
198
+
199
+ def _save_image(self, item, path=None):
200
+ src_ext = item.image.ext.lower()
201
+ dst_ext = osp.splitext(osp.basename(path))[1].lower()
202
+ fmt = DetectionApiPath.IMAGE_EXT_FORMAT.get(dst_ext, '')
203
+ if not fmt:
204
+ log.warning("Item '%s': can't find format string for the '%s' "
205
+ "image extension, the corresponding field will be empty." % \
206
+ (item.id, dst_ext))
207
+
208
+ if src_ext == dst_ext and isinstance(item.image, ByteImage):
209
+ buffer = item.image.get_bytes()
210
+ else:
211
+ buffer = encode_image(item.image.data, dst_ext)
212
+ return buffer, fmt
testbed/openvinotoolkit__datumaro/datumaro/plugins/tf_detection_api_format/extractor.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from collections import OrderedDict
7
+ import numpy as np
8
+ import os.path as osp
9
+ import re
10
+
11
+ from datumaro.components.extractor import (SourceExtractor, DatasetItem,
12
+ AnnotationType, Bbox, Mask, LabelCategories, Importer
13
+ )
14
+ from datumaro.util.image import ByteImage, decode_image, lazy_image
15
+ from datumaro.util.tf_util import import_tf as _import_tf
16
+
17
+ from .format import DetectionApiPath
18
+ tf = _import_tf()
19
+
20
+
21
+ def clamp(value, _min, _max):
22
+ return max(min(_max, value), _min)
23
+
24
+ class TfDetectionApiExtractor(SourceExtractor):
25
+ def __init__(self, path):
26
+ assert osp.isfile(path), path
27
+ images_dir = ''
28
+ root_dir = osp.dirname(osp.abspath(path))
29
+ if osp.basename(root_dir) == DetectionApiPath.ANNOTATIONS_DIR:
30
+ root_dir = osp.dirname(root_dir)
31
+ images_dir = osp.join(root_dir, DetectionApiPath.IMAGES_DIR)
32
+ if not osp.isdir(images_dir):
33
+ images_dir = ''
34
+
35
+ super().__init__(subset=osp.splitext(osp.basename(path))[0])
36
+
37
+ items, labels = self._parse_tfrecord_file(path, self._subset, images_dir)
38
+ self._items = items
39
+ self._categories = self._load_categories(labels)
40
+
41
+ @staticmethod
42
+ def _load_categories(labels):
43
+ label_categories = LabelCategories().from_iterable(
44
+ e[0] for e in sorted(labels.items(), key=lambda item: item[1])
45
+ )
46
+ return { AnnotationType.label: label_categories }
47
+
48
+ @classmethod
49
+ def _parse_labelmap(cls, text):
50
+ id_pattern = r'(?:id\s*:\s*(?P<id>\d+))'
51
+ name_pattern = r'(?:name\s*:\s*[\'\"](?P<name>.*?)[\'\"])'
52
+ entry_pattern = r'(\{(?:[\s\n]*(?:%(id)s|%(name)s)[\s\n]*){2}\})+' % \
53
+ {'id': id_pattern, 'name': name_pattern}
54
+ matches = re.finditer(entry_pattern, text)
55
+
56
+ labelmap = {}
57
+ for match in matches:
58
+ label_id = match.group('id')
59
+ label_name = match.group('name')
60
+ if label_id is not None and label_name is not None:
61
+ labelmap[label_name] = int(label_id)
62
+
63
+ return labelmap
64
+
65
+ @classmethod
66
+ def _parse_tfrecord_file(cls, filepath, subset, images_dir):
67
+ dataset = tf.data.TFRecordDataset(filepath)
68
+ features = {
69
+ 'image/filename': tf.io.FixedLenFeature([], tf.string),
70
+ 'image/source_id': tf.io.FixedLenFeature([], tf.string),
71
+ 'image/height': tf.io.FixedLenFeature([], tf.int64),
72
+ 'image/width': tf.io.FixedLenFeature([], tf.int64),
73
+ 'image/encoded': tf.io.FixedLenFeature([], tf.string),
74
+ 'image/format': tf.io.FixedLenFeature([], tf.string),
75
+
76
+ # use varlen to avoid errors when this field is missing
77
+ 'image/key/sha256': tf.io.VarLenFeature(tf.string),
78
+
79
+ # Object boxes and classes.
80
+ 'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),
81
+ 'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),
82
+ 'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),
83
+ 'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),
84
+ 'image/object/class/label': tf.io.VarLenFeature(tf.int64),
85
+ 'image/object/class/text': tf.io.VarLenFeature(tf.string),
86
+ 'image/object/mask': tf.io.VarLenFeature(tf.string),
87
+ }
88
+
89
+ dataset_labels = OrderedDict()
90
+ labelmap_path = osp.join(osp.dirname(filepath),
91
+ DetectionApiPath.LABELMAP_FILE)
92
+ if osp.exists(labelmap_path):
93
+ with open(labelmap_path, 'r', encoding='utf-8') as f:
94
+ labelmap_text = f.read()
95
+ dataset_labels.update({ label: id - 1
96
+ for label, id in cls._parse_labelmap(labelmap_text).items()
97
+ })
98
+
99
+ dataset_items = []
100
+
101
+ for record in dataset:
102
+ parsed_record = tf.io.parse_single_example(record, features)
103
+ frame_id = parsed_record['image/source_id'].numpy().decode('utf-8')
104
+ frame_filename = \
105
+ parsed_record['image/filename'].numpy().decode('utf-8')
106
+ frame_height = tf.cast(
107
+ parsed_record['image/height'], tf.int64).numpy().item()
108
+ frame_width = tf.cast(
109
+ parsed_record['image/width'], tf.int64).numpy().item()
110
+ frame_image = parsed_record['image/encoded'].numpy()
111
+ xmins = tf.sparse.to_dense(
112
+ parsed_record['image/object/bbox/xmin']).numpy()
113
+ ymins = tf.sparse.to_dense(
114
+ parsed_record['image/object/bbox/ymin']).numpy()
115
+ xmaxs = tf.sparse.to_dense(
116
+ parsed_record['image/object/bbox/xmax']).numpy()
117
+ ymaxs = tf.sparse.to_dense(
118
+ parsed_record['image/object/bbox/ymax']).numpy()
119
+ label_ids = tf.sparse.to_dense(
120
+ parsed_record['image/object/class/label']).numpy()
121
+ labels = tf.sparse.to_dense(
122
+ parsed_record['image/object/class/text'],
123
+ default_value=b'').numpy()
124
+ masks = tf.sparse.to_dense(
125
+ parsed_record['image/object/mask'],
126
+ default_value=b'').numpy()
127
+
128
+ for label, label_id in zip(labels, label_ids):
129
+ label = label.decode('utf-8')
130
+ if not label:
131
+ continue
132
+ if label_id <= 0:
133
+ continue
134
+ if label in dataset_labels:
135
+ continue
136
+ dataset_labels[label] = label_id - 1
137
+
138
+ item_id = osp.splitext(frame_filename)[0]
139
+
140
+ annotations = []
141
+ for shape_id, shape in enumerate(
142
+ np.dstack((labels, xmins, ymins, xmaxs, ymaxs))[0]):
143
+ label = shape[0].decode('utf-8')
144
+
145
+ mask = None
146
+ if len(masks) != 0:
147
+ mask = masks[shape_id]
148
+
149
+ if mask is not None:
150
+ if isinstance(mask, bytes):
151
+ mask = lazy_image(mask, decode_image)
152
+ annotations.append(Mask(image=mask,
153
+ label=dataset_labels.get(label)
154
+ ))
155
+ else:
156
+ x = clamp(shape[1] * frame_width, 0, frame_width)
157
+ y = clamp(shape[2] * frame_height, 0, frame_height)
158
+ w = clamp(shape[3] * frame_width, 0, frame_width) - x
159
+ h = clamp(shape[4] * frame_height, 0, frame_height) - y
160
+ annotations.append(Bbox(x, y, w, h,
161
+ label=dataset_labels.get(label)
162
+ ))
163
+
164
+ image_size = None
165
+ if frame_height and frame_width:
166
+ image_size = (frame_height, frame_width)
167
+
168
+ image_params = {}
169
+ if frame_image:
170
+ image_params['data'] = frame_image
171
+ if frame_filename:
172
+ image_params['path'] = osp.join(images_dir, frame_filename)
173
+
174
+ image = None
175
+ if image_params:
176
+ image = ByteImage(**image_params, size=image_size)
177
+
178
+ dataset_items.append(DatasetItem(id=item_id, subset=subset,
179
+ image=image, annotations=annotations,
180
+ attributes={'source_id': frame_id}))
181
+
182
+ return dataset_items, dataset_labels
183
+
184
+ class TfDetectionApiImporter(Importer):
185
+ @classmethod
186
+ def find_sources(cls, path):
187
+ return cls._find_sources_recursive(path, '.tfrecord', 'tf_detection_api')
testbed/openvinotoolkit__datumaro/datumaro/plugins/transforms.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from collections import Counter
6
+ from enum import Enum
7
+ import logging as log
8
+ import os.path as osp
9
+ import random
10
+ import re
11
+
12
+ import pycocotools.mask as mask_utils
13
+
14
+ from datumaro.components.extractor import (Transform, AnnotationType,
15
+ RleMask, Polygon, Bbox, Label, DEFAULT_SUBSET_NAME,
16
+ LabelCategories, MaskCategories, PointsCategories
17
+ )
18
+ from datumaro.components.cli_plugin import CliPlugin
19
+ import datumaro.util.mask_tools as mask_tools
20
+ from datumaro.util.annotation_util import find_group_leader, find_instances
21
+
22
+
23
+ class CropCoveredSegments(Transform, CliPlugin):
24
+ def transform_item(self, item):
25
+ annotations = []
26
+ segments = []
27
+ for ann in item.annotations:
28
+ if ann.type in {AnnotationType.polygon, AnnotationType.mask}:
29
+ segments.append(ann)
30
+ else:
31
+ annotations.append(ann)
32
+ if not segments:
33
+ return item
34
+
35
+ if not item.has_image:
36
+ raise Exception("Image info is required for this transform")
37
+ h, w = item.image.size
38
+ segments = self.crop_segments(segments, w, h)
39
+
40
+ annotations += segments
41
+ return self.wrap_item(item, annotations=annotations)
42
+
43
+ @classmethod
44
+ def crop_segments(cls, segment_anns, img_width, img_height):
45
+ segment_anns = sorted(segment_anns, key=lambda x: x.z_order)
46
+
47
+ segments = []
48
+ for s in segment_anns:
49
+ if s.type == AnnotationType.polygon:
50
+ segments.append(s.points)
51
+ elif s.type == AnnotationType.mask:
52
+ if isinstance(s, RleMask):
53
+ rle = s.rle
54
+ else:
55
+ rle = mask_tools.mask_to_rle(s.image)
56
+ segments.append(rle)
57
+
58
+ segments = mask_tools.crop_covered_segments(
59
+ segments, img_width, img_height)
60
+
61
+ new_anns = []
62
+ for ann, new_segment in zip(segment_anns, segments):
63
+ fields = {'z_order': ann.z_order, 'label': ann.label,
64
+ 'id': ann.id, 'group': ann.group, 'attributes': ann.attributes
65
+ }
66
+ if ann.type == AnnotationType.polygon:
67
+ if fields['group'] is None:
68
+ fields['group'] = cls._make_group_id(
69
+ segment_anns + new_anns, fields['id'])
70
+ for polygon in new_segment:
71
+ new_anns.append(Polygon(points=polygon, **fields))
72
+ else:
73
+ rle = mask_tools.mask_to_rle(new_segment)
74
+ rle = mask_utils.frPyObjects(rle, *rle['size'])
75
+ new_anns.append(RleMask(rle=rle, **fields))
76
+
77
+ return new_anns
78
+
79
+ @staticmethod
80
+ def _make_group_id(anns, ann_id):
81
+ if ann_id:
82
+ return ann_id
83
+ max_gid = max(anns, default=0, key=lambda x: x.group)
84
+ return max_gid + 1
85
+
86
+ class MergeInstanceSegments(Transform, CliPlugin):
87
+ """
88
+ Replaces instance masks and, optionally, polygons with a single mask.
89
+ """
90
+
91
+ @classmethod
92
+ def build_cmdline_parser(cls, **kwargs):
93
+ parser = super().build_cmdline_parser(**kwargs)
94
+ parser.add_argument('--include-polygons', action='store_true',
95
+ help="Include polygons")
96
+ return parser
97
+
98
+ def __init__(self, extractor, include_polygons=False):
99
+ super().__init__(extractor)
100
+
101
+ self._include_polygons = include_polygons
102
+
103
+ def transform_item(self, item):
104
+ annotations = []
105
+ segments = []
106
+ for ann in item.annotations:
107
+ if ann.type in {AnnotationType.polygon, AnnotationType.mask}:
108
+ segments.append(ann)
109
+ else:
110
+ annotations.append(ann)
111
+ if not segments:
112
+ return item
113
+
114
+ if not item.has_image:
115
+ raise Exception("Image info is required for this transform")
116
+ h, w = item.image.size
117
+ instances = self.find_instances(segments)
118
+ segments = [self.merge_segments(i, w, h, self._include_polygons)
119
+ for i in instances]
120
+ segments = sum(segments, [])
121
+
122
+ annotations += segments
123
+ return self.wrap_item(item, annotations=annotations)
124
+
125
+ @classmethod
126
+ def merge_segments(cls, instance, img_width, img_height,
127
+ include_polygons=False):
128
+ polygons = [a for a in instance if a.type == AnnotationType.polygon]
129
+ masks = [a for a in instance if a.type == AnnotationType.mask]
130
+ if not polygons and not masks:
131
+ return []
132
+
133
+ leader = find_group_leader(polygons + masks)
134
+ instance = []
135
+
136
+ # Build the resulting mask
137
+ mask = None
138
+
139
+ if include_polygons and polygons:
140
+ polygons = [p.points for p in polygons]
141
+ mask = mask_tools.rles_to_mask(polygons, img_width, img_height)
142
+ else:
143
+ instance += polygons # keep unused polygons
144
+
145
+ if masks:
146
+ masks = [m.image for m in masks]
147
+ if mask is not None:
148
+ masks += [mask]
149
+ mask = mask_tools.merge_masks(masks)
150
+
151
+ if mask is None:
152
+ return instance
153
+
154
+ mask = mask_tools.mask_to_rle(mask)
155
+ mask = mask_utils.frPyObjects(mask, *mask['size'])
156
+ instance.append(
157
+ RleMask(rle=mask, label=leader.label, z_order=leader.z_order,
158
+ id=leader.id, attributes=leader.attributes, group=leader.group
159
+ )
160
+ )
161
+ return instance
162
+
163
+ @staticmethod
164
+ def find_instances(annotations):
165
+ return find_instances(a for a in annotations
166
+ if a.type in {AnnotationType.polygon, AnnotationType.mask})
167
+
168
+ class PolygonsToMasks(Transform, CliPlugin):
169
+ def transform_item(self, item):
170
+ annotations = []
171
+ for ann in item.annotations:
172
+ if ann.type == AnnotationType.polygon:
173
+ if not item.has_image:
174
+ raise Exception("Image info is required for this transform")
175
+ h, w = item.image.size
176
+ annotations.append(self.convert_polygon(ann, h, w))
177
+ else:
178
+ annotations.append(ann)
179
+
180
+ return self.wrap_item(item, annotations=annotations)
181
+
182
+ @staticmethod
183
+ def convert_polygon(polygon, img_h, img_w):
184
+ rle = mask_utils.frPyObjects([polygon.points], img_h, img_w)[0]
185
+
186
+ return RleMask(rle=rle, label=polygon.label, z_order=polygon.z_order,
187
+ id=polygon.id, attributes=polygon.attributes, group=polygon.group)
188
+
189
+ class BoxesToMasks(Transform, CliPlugin):
190
+ def transform_item(self, item):
191
+ annotations = []
192
+ for ann in item.annotations:
193
+ if ann.type == AnnotationType.bbox:
194
+ if not item.has_image:
195
+ raise Exception("Image info is required for this transform")
196
+ h, w = item.image.size
197
+ annotations.append(self.convert_bbox(ann, h, w))
198
+ else:
199
+ annotations.append(ann)
200
+
201
+ return self.wrap_item(item, annotations=annotations)
202
+
203
+ @staticmethod
204
+ def convert_bbox(bbox, img_h, img_w):
205
+ rle = mask_utils.frPyObjects([bbox.as_polygon()], img_h, img_w)[0]
206
+
207
+ return RleMask(rle=rle, label=bbox.label, z_order=bbox.z_order,
208
+ id=bbox.id, attributes=bbox.attributes, group=bbox.group)
209
+
210
+ class MasksToPolygons(Transform, CliPlugin):
211
+ def transform_item(self, item):
212
+ annotations = []
213
+ for ann in item.annotations:
214
+ if ann.type == AnnotationType.mask:
215
+ polygons = self.convert_mask(ann)
216
+ if not polygons:
217
+ log.debug("[%s]: item %s: "
218
+ "Mask conversion to polygons resulted in too "
219
+ "small polygons, which were discarded" % \
220
+ (self._get_name(__class__), item.id))
221
+ annotations.extend(polygons)
222
+ else:
223
+ annotations.append(ann)
224
+
225
+ return self.wrap_item(item, annotations=annotations)
226
+
227
+ @staticmethod
228
+ def convert_mask(mask):
229
+ polygons = mask_tools.mask_to_polygons(mask.image)
230
+
231
+ return [
232
+ Polygon(points=p, label=mask.label, z_order=mask.z_order,
233
+ id=mask.id, attributes=mask.attributes, group=mask.group)
234
+ for p in polygons
235
+ ]
236
+
237
+ class ShapesToBoxes(Transform, CliPlugin):
238
+ def transform_item(self, item):
239
+ annotations = []
240
+ for ann in item.annotations:
241
+ if ann.type in { AnnotationType.mask, AnnotationType.polygon,
242
+ AnnotationType.polyline, AnnotationType.points,
243
+ }:
244
+ annotations.append(self.convert_shape(ann))
245
+ else:
246
+ annotations.append(ann)
247
+
248
+ return self.wrap_item(item, annotations=annotations)
249
+
250
+ @staticmethod
251
+ def convert_shape(shape):
252
+ bbox = shape.get_bbox()
253
+ return Bbox(*bbox, label=shape.label, z_order=shape.z_order,
254
+ id=shape.id, attributes=shape.attributes, group=shape.group)
255
+
256
+ class Reindex(Transform, CliPlugin):
257
+ @classmethod
258
+ def build_cmdline_parser(cls, **kwargs):
259
+ parser = super().build_cmdline_parser(**kwargs)
260
+ parser.add_argument('-s', '--start', type=int, default=1,
261
+ help="Start value for item ids")
262
+ return parser
263
+
264
+ def __init__(self, extractor, start=1):
265
+ super().__init__(extractor)
266
+ self._length = 'parent'
267
+ self._start = start
268
+
269
+ def __iter__(self):
270
+ for i, item in enumerate(self._extractor):
271
+ yield self.wrap_item(item, id=i + self._start)
272
+
273
+ class MapSubsets(Transform, CliPlugin):
274
+ @staticmethod
275
+ def _mapping_arg(s):
276
+ parts = s.split(':')
277
+ if len(parts) != 2:
278
+ import argparse
279
+ raise argparse.ArgumentTypeError()
280
+ return parts
281
+
282
+ @classmethod
283
+ def build_cmdline_parser(cls, **kwargs):
284
+ parser = super().build_cmdline_parser(**kwargs)
285
+ parser.add_argument('-s', '--subset', action='append',
286
+ type=cls._mapping_arg, dest='mapping',
287
+ help="Subset mapping of the form: 'src:dst' (repeatable)")
288
+ return parser
289
+
290
+ def __init__(self, extractor, mapping=None):
291
+ super().__init__(extractor)
292
+
293
+ if mapping is None:
294
+ mapping = {}
295
+ elif not isinstance(mapping, dict):
296
+ mapping = dict(tuple(m) for m in mapping)
297
+ self._mapping = mapping
298
+
299
+ if extractor._subsets:
300
+ counts = Counter(mapping.get(s, s) or DEFAULT_SUBSET_NAME
301
+ for s in extractor._subsets)
302
+ if all(c == 1 for c in counts.values()):
303
+ self._length = 'parent'
304
+ self._subsets = set(counts)
305
+
306
+ def transform_item(self, item):
307
+ return self.wrap_item(item,
308
+ subset=self._mapping.get(item.subset, item.subset))
309
+
310
+ class RandomSplit(Transform, CliPlugin):
311
+ """
312
+ Joins all subsets into one and splits the result into few parts.
313
+ It is expected that item ids are unique and subset ratios sum up to 1.|n
314
+ |n
315
+ Example:|n
316
+ |s|s%(prog)s --subset train:.67 --subset test:.33
317
+ """
318
+
319
+ # avoid https://bugs.python.org/issue16399
320
+ _default_split = [('train', 0.67), ('test', 0.33)]
321
+
322
+ @staticmethod
323
+ def _split_arg(s):
324
+ parts = s.split(':')
325
+ if len(parts) != 2:
326
+ import argparse
327
+ raise argparse.ArgumentTypeError()
328
+ return (parts[0], float(parts[1]))
329
+
330
+ @classmethod
331
+ def build_cmdline_parser(cls, **kwargs):
332
+ parser = super().build_cmdline_parser(**kwargs)
333
+ parser.add_argument('-s', '--subset', action='append',
334
+ type=cls._split_arg, dest='splits',
335
+ help="Subsets in the form: '<subset>:<ratio>' "
336
+ "(repeatable, default: %s)" % dict(cls._default_split))
337
+ parser.add_argument('--seed', type=int, help="Random seed")
338
+ return parser
339
+
340
+ def __init__(self, extractor, splits, seed=None):
341
+ super().__init__(extractor)
342
+
343
+ if splits is None:
344
+ splits = self._default_split
345
+
346
+ assert 0 < len(splits), "Expected at least one split"
347
+ assert all(0.0 <= r and r <= 1.0 for _, r in splits), \
348
+ "Ratios are expected to be in the range [0; 1], but got %s" % splits
349
+
350
+ total_ratio = sum(s[1] for s in splits)
351
+ if not abs(total_ratio - 1.0) <= 1e-7:
352
+ raise Exception(
353
+ "Sum of ratios is expected to be 1, got %s, which is %s" %
354
+ (splits, total_ratio))
355
+
356
+ dataset_size = len(extractor)
357
+ indices = list(range(dataset_size))
358
+ random.seed(seed)
359
+ random.shuffle(indices)
360
+ parts = []
361
+ s = 0
362
+ lower_boundary = 0
363
+ for split_idx, (subset, ratio) in enumerate(splits):
364
+ s += ratio
365
+ upper_boundary = int(s * dataset_size)
366
+ if split_idx == len(splits) - 1:
367
+ upper_boundary = dataset_size
368
+ subset_indices = set(indices[lower_boundary : upper_boundary])
369
+ parts.append((subset_indices, subset))
370
+ lower_boundary = upper_boundary
371
+ self._parts = parts
372
+
373
+ self._subsets = set(s[0] for s in splits)
374
+ self._length = 'parent'
375
+
376
+ def _find_split(self, index):
377
+ for subset_indices, subset in self._parts:
378
+ if index in subset_indices:
379
+ return subset
380
+ return subset # all the possible remainder goes to the last split
381
+
382
+ def __iter__(self):
383
+ for i, item in enumerate(self._extractor):
384
+ yield self.wrap_item(item, subset=self._find_split(i))
385
+
386
+ class IdFromImageName(Transform, CliPlugin):
387
+ def transform_item(self, item):
388
+ if item.has_image and item.image.path:
389
+ name = osp.splitext(osp.basename(item.image.path))[0]
390
+ return self.wrap_item(item, id=name)
391
+ else:
392
+ log.debug("Can't change item id for item '%s': "
393
+ "item has no image info" % item.id)
394
+ return item
395
+
396
+ class Rename(Transform, CliPlugin):
397
+ """
398
+ Renames items in the dataset. Supports regular expressions.
399
+ The first character in the expression is a delimiter for
400
+ the pattern and replacement parts. Replacement part can also
401
+ contain string.format tokens with 'item' object available.|n
402
+ |n
403
+ Examples:|n
404
+ - Replace 'pattern' with 'replacement':|n
405
+ |s|srename -e '|pattern|replacement|'|n
406
+ - Remove 'frame_' from item ids:|n
407
+ |s|srename -e '|frame_(\d+)|\\1|'
408
+ """
409
+
410
+ @classmethod
411
+ def build_cmdline_parser(cls, **kwargs):
412
+ parser = super().build_cmdline_parser(**kwargs)
413
+ parser.add_argument('-e', '--regex',
414
+ help="Regex for renaming.")
415
+ return parser
416
+
417
+ def __init__(self, extractor, regex):
418
+ super().__init__(extractor)
419
+
420
+ assert regex and isinstance(regex, str)
421
+ parts = regex.split(regex[0], maxsplit=3)
422
+ regex, sub = parts[1:3]
423
+ self._re = re.compile(regex)
424
+ self._sub = sub
425
+
426
+ def transform_item(self, item):
427
+ return self.wrap_item(item, id=self._re.sub(self._sub, item.id) \
428
+ .format(item=item))
429
+
430
+ class RemapLabels(Transform, CliPlugin):
431
+ """
432
+ Changes labels in the dataset.|n
433
+ Examples:|n
434
+ - Rename 'person' to 'car' and 'cat' to 'dog', keep 'bus', remove others:|n
435
+ |s|sremap_labels -l person:car -l bus:bus -l cat:dog --default delete
436
+ """
437
+
438
+ DefaultAction = Enum('DefaultAction', ['keep', 'delete'])
439
+
440
+ @staticmethod
441
+ def _split_arg(s):
442
+ parts = s.split(':')
443
+ if len(parts) != 2:
444
+ import argparse
445
+ raise argparse.ArgumentTypeError()
446
+ return (parts[0], parts[1])
447
+
448
+ @classmethod
449
+ def build_cmdline_parser(cls, **kwargs):
450
+ parser = super().build_cmdline_parser(**kwargs)
451
+ parser.add_argument('-l', '--label', action='append',
452
+ type=cls._split_arg, dest='mapping',
453
+ help="Label in the form of: '<src>:<dst>' (repeatable)")
454
+ parser.add_argument('--default',
455
+ choices=[a.name for a in cls.DefaultAction],
456
+ default=cls.DefaultAction.keep.name,
457
+ help="Action for unspecified labels (default: %(default)s)")
458
+ return parser
459
+
460
+ def __init__(self, extractor, mapping, default=None):
461
+ super().__init__(extractor)
462
+
463
+ assert isinstance(default, (str, self.DefaultAction))
464
+ if isinstance(default, str):
465
+ default = self.DefaultAction[default]
466
+
467
+ assert isinstance(mapping, (dict, list))
468
+ if isinstance(mapping, list):
469
+ mapping = dict(mapping)
470
+
471
+ self._categories = {}
472
+
473
+ src_label_cat = self._extractor.categories().get(AnnotationType.label)
474
+ if src_label_cat is not None:
475
+ self._make_label_id_map(src_label_cat, mapping, default)
476
+
477
+ src_mask_cat = self._extractor.categories().get(AnnotationType.mask)
478
+ if src_mask_cat is not None:
479
+ assert src_label_cat is not None
480
+ dst_mask_cat = MaskCategories(attributes=src_mask_cat.attributes)
481
+ dst_mask_cat.colormap = {
482
+ id: src_mask_cat.colormap[id]
483
+ for id, _ in enumerate(src_label_cat.items)
484
+ if self._map_id(id) or id == 0
485
+ }
486
+ self._categories[AnnotationType.mask] = dst_mask_cat
487
+
488
+ src_points_cat = self._extractor.categories().get(AnnotationType.points)
489
+ if src_points_cat is not None:
490
+ assert src_label_cat is not None
491
+ dst_points_cat = PointsCategories(attributes=src_points_cat.attributes)
492
+ dst_points_cat.items = {
493
+ id: src_points_cat.items[id]
494
+ for id, item in enumerate(src_label_cat.items)
495
+ if self._map_id(id) or id == 0
496
+ }
497
+ self._categories[AnnotationType.points] = dst_points_cat
498
+
499
+ def _make_label_id_map(self, src_label_cat, label_mapping, default_action):
500
+ dst_label_cat = LabelCategories(attributes=src_label_cat.attributes)
501
+ id_mapping = {}
502
+ for src_index, src_label in enumerate(src_label_cat.items):
503
+ dst_label = label_mapping.get(src_label.name)
504
+ if not dst_label and default_action == self.DefaultAction.keep:
505
+ dst_label = src_label.name # keep unspecified as is
506
+ if not dst_label:
507
+ continue
508
+
509
+ dst_index = dst_label_cat.find(dst_label)[0]
510
+ if dst_index is None:
511
+ dst_index = dst_label_cat.add(dst_label,
512
+ src_label.parent, src_label.attributes)
513
+ id_mapping[src_index] = dst_index
514
+
515
+ if log.getLogger().isEnabledFor(log.DEBUG):
516
+ log.debug("Label mapping:")
517
+ for src_id, src_label in enumerate(src_label_cat.items):
518
+ if id_mapping.get(src_id):
519
+ log.debug("#%s '%s' -> #%s '%s'",
520
+ src_id, src_label.name, id_mapping[src_id],
521
+ dst_label_cat.items[id_mapping[src_id]].name
522
+ )
523
+ else:
524
+ log.debug("#%s '%s' -> <deleted>", src_id, src_label.name)
525
+
526
+ self._map_id = lambda src_id: id_mapping.get(src_id, None)
527
+ self._categories[AnnotationType.label] = dst_label_cat
528
+
529
+ def categories(self):
530
+ return self._categories
531
+
532
+ def transform_item(self, item):
533
+ annotations = []
534
+ for ann in item.annotations:
535
+ if ann.type in { AnnotationType.label, AnnotationType.mask,
536
+ AnnotationType.points, AnnotationType.polygon,
537
+ AnnotationType.polyline, AnnotationType.bbox
538
+ } and ann.label is not None:
539
+ conv_label = self._map_id(ann.label)
540
+ if conv_label is not None:
541
+ annotations.append(ann.wrap(label=conv_label))
542
+ else:
543
+ annotations.append(ann.wrap())
544
+ return item.wrap(annotations=annotations)
545
+
546
+ class AnnsToLabels(Transform, CliPlugin):
547
+ """
548
+ Collects all labels from annotations (of all types) and
549
+ transforms them into a set of annotations of type Label
550
+ """
551
+
552
+ def transform_item(self, item):
553
+ labels = set(p.label for p in item.annotations
554
+ if getattr(p, 'label') != None)
555
+ annotations = []
556
+ for label in labels:
557
+ annotations.append(Label(label=label))
558
+
559
+ return item.wrap(annotations=annotations)
testbed/openvinotoolkit__datumaro/datumaro/plugins/vgg_face2_format.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ import csv
6
+ import os
7
+ import os.path as osp
8
+ from glob import glob
9
+
10
+ from datumaro.components.converter import Converter
11
+ from datumaro.components.extractor import (AnnotationType, Bbox, DatasetItem,
12
+ Importer, Points, LabelCategories, SourceExtractor)
13
+
14
+
15
+ class VggFace2Path:
16
+ ANNOTATION_DIR = "bb_landmark"
17
+ IMAGE_EXT = '.jpg'
18
+ BBOXES_FILE = 'loose_bb_'
19
+ LANDMARKS_FILE = 'loose_landmark_'
20
+
21
+ class VggFace2Extractor(SourceExtractor):
22
+ def __init__(self, path):
23
+ if not osp.isfile(path):
24
+ raise Exception("Can't read .csv annotation file '%s'" % path)
25
+ self._path = path
26
+ self._dataset_dir = osp.dirname(osp.dirname(path))
27
+
28
+ subset = osp.splitext(osp.basename(path))[0]
29
+ if subset.startswith(VggFace2Path.LANDMARKS_FILE):
30
+ subset = subset.split('_')[2]
31
+ super().__init__(subset=subset)
32
+
33
+ self._load_categories()
34
+ self._items = list(self._load_items(path).values())
35
+
36
+ def _load_categories(self):
37
+ self._categories[AnnotationType.label] = LabelCategories()
38
+
39
+ def _load_items(self, path):
40
+ items = {}
41
+ with open(path) as content:
42
+ landmarks_table = list(csv.DictReader(content))
43
+
44
+ for row in landmarks_table:
45
+ item_id = row['NAME_ID']
46
+ image_path = osp.join(self._dataset_dir, self._subset,
47
+ item_id + VggFace2Path.IMAGE_EXT)
48
+ annotations = []
49
+ if len([p for p in row if row[p] == '']) == 0 and len(row) == 11:
50
+ annotations.append(Points(
51
+ [float(row[p]) for p in row if p != 'NAME_ID']))
52
+ if item_id in items and 0 < len(annotations):
53
+ annotation = items[item_id].annotations
54
+ annotation.append(annotations[0])
55
+ else:
56
+ items[item_id] = DatasetItem(id=item_id, subset=self._subset,
57
+ image=image_path, annotations=annotations)
58
+
59
+ bboxes_path = osp.join(self._dataset_dir, VggFace2Path.ANNOTATION_DIR,
60
+ VggFace2Path.BBOXES_FILE + self._subset + '.csv')
61
+ if osp.isfile(bboxes_path):
62
+ with open(bboxes_path) as content:
63
+ bboxes_table = list(csv.DictReader(content))
64
+ for row in bboxes_table:
65
+ if len([p for p in row if row[p] == '']) == 0 and len(row) == 5:
66
+ item_id = row['NAME_ID']
67
+ annotations = items[item_id].annotations
68
+ annotations.append(Bbox(int(row['X']), int(row['Y']),
69
+ int(row['W']), int(row['H'])))
70
+ return items
71
+
72
+ class VggFace2Importer(Importer):
73
+ @classmethod
74
+ def find_sources(cls, path):
75
+ subset_paths = [p for p in glob(osp.join(path,
76
+ VggFace2Path.ANNOTATION_DIR, '**.csv'), recursive=True)
77
+ if not osp.basename(p).startswith(VggFace2Path.BBOXES_FILE)]
78
+ sources = []
79
+ for subset_path in subset_paths:
80
+ sources += cls._find_sources_recursive(
81
+ subset_path, '.csv', 'vgg_face2')
82
+ return sources
83
+
84
+ class VggFace2Converter(Converter):
85
+ DEFAULT_IMAGE_EXT = '.jpg'
86
+
87
+ def apply(self):
88
+ save_dir = self._save_dir
89
+
90
+ os.makedirs(save_dir, exist_ok=True)
91
+ for subset_name, subset in self._extractor.subsets().items():
92
+ subset_dir = osp.join(save_dir, subset_name)
93
+ bboxes_table = []
94
+ landmarks_table = []
95
+ for item in subset:
96
+ if item.has_image and self._save_images:
97
+ self._save_image(item, osp.join(save_dir, subset_dir,
98
+ item.id + VggFace2Path.IMAGE_EXT))
99
+
100
+ landmarks = [a for a in item.annotations
101
+ if a.type == AnnotationType.points]
102
+ if landmarks:
103
+ for landmark in landmarks:
104
+ points = landmark.points
105
+ landmarks_table.append({'NAME_ID': item.id,
106
+ 'P1X': points[0], 'P1Y': points[1],
107
+ 'P2X': points[2], 'P2Y': points[3],
108
+ 'P3X': points[4], 'P3Y': points[5],
109
+ 'P4X': points[6], 'P4Y': points[7],
110
+ 'P5X': points[8], 'P5Y': points[9]})
111
+ else:
112
+ landmarks_table.append({'NAME_ID': item.id})
113
+
114
+ bboxes = [a for a in item.annotations
115
+ if a.type == AnnotationType.bbox]
116
+ if bboxes:
117
+ for bbox in bboxes:
118
+ bboxes_table.append({'NAME_ID': item.id, 'X': int(bbox.x),
119
+ 'Y': int(bbox.y), 'W': int(bbox.w), 'H': int(bbox.h)})
120
+
121
+ landmarks_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
122
+ VggFace2Path.LANDMARKS_FILE + subset_name + '.csv')
123
+ os.makedirs(osp.dirname(landmarks_path), exist_ok=True)
124
+ with open(landmarks_path, 'w', newline='') as file:
125
+ columns = ['NAME_ID', 'P1X', 'P1Y', 'P2X', 'P2Y',
126
+ 'P3X', 'P3Y', 'P4X', 'P4Y', 'P5X', 'P5Y']
127
+ writer = csv.DictWriter(file, fieldnames=columns)
128
+ writer.writeheader()
129
+ writer.writerows(landmarks_table)
130
+
131
+ if bboxes_table:
132
+ bboxes_path = osp.join(save_dir, VggFace2Path.ANNOTATION_DIR,
133
+ VggFace2Path.BBOXES_FILE + subset_name + '.csv')
134
+ os.makedirs(osp.dirname(bboxes_path), exist_ok=True)
135
+ with open(bboxes_path, 'w', newline='') as file:
136
+ columns = ['NAME_ID', 'X', 'Y', 'W', 'H']
137
+ writer = csv.DictWriter(file, fieldnames=columns)
138
+ writer.writeheader()
139
+ writer.writerows(bboxes_table)
testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/__init__.py ADDED
File without changes
testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/converter.py ADDED
@@ -0,0 +1,579 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import logging as log
7
+ import os
8
+ import os.path as osp
9
+ from collections import OrderedDict, defaultdict
10
+ from enum import Enum
11
+ from itertools import chain
12
+
13
+ from lxml import etree as ET
14
+
15
+ from datumaro.components.converter import Converter
16
+ from datumaro.components.extractor import (DEFAULT_SUBSET_NAME, AnnotationType,
17
+ CompiledMask, LabelCategories)
18
+ from datumaro.util import find, str_to_bool
19
+ from datumaro.util.image import save_image
20
+ from datumaro.util.mask_tools import paint_mask, remap_mask
21
+
22
+ from .format import (VocTask, VocPath, VocInstColormap,
23
+ parse_label_map, make_voc_label_map, make_voc_categories, write_label_map
24
+ )
25
+
26
+
27
+ def _convert_attr(name, attributes, type_conv, default=None, warn=True):
28
+ d = object()
29
+ value = attributes.get(name, d)
30
+ if value is d:
31
+ return default
32
+
33
+ try:
34
+ return type_conv(value)
35
+ except Exception as e:
36
+ log.warning("Failed to convert attribute '%s'='%s': %s" % \
37
+ (name, value, e))
38
+ return default
39
+
40
+ def _write_xml_bbox(bbox, parent_elem):
41
+ x, y, w, h = bbox
42
+ bbox_elem = ET.SubElement(parent_elem, 'bndbox')
43
+ ET.SubElement(bbox_elem, 'xmin').text = str(x)
44
+ ET.SubElement(bbox_elem, 'ymin').text = str(y)
45
+ ET.SubElement(bbox_elem, 'xmax').text = str(x + w)
46
+ ET.SubElement(bbox_elem, 'ymax').text = str(y + h)
47
+ return bbox_elem
48
+
49
+
50
+ LabelmapType = Enum('LabelmapType', ['voc', 'source'])
51
+
52
+ class VocConverter(Converter):
53
+ DEFAULT_IMAGE_EXT = VocPath.IMAGE_EXT
54
+
55
+ @staticmethod
56
+ def _split_tasks_string(s):
57
+ return [VocTask[i.strip()] for i in s.split(',')]
58
+
59
+ @staticmethod
60
+ def _get_labelmap(s):
61
+ if osp.isfile(s):
62
+ return s
63
+ try:
64
+ return LabelmapType[s].name
65
+ except KeyError:
66
+ import argparse
67
+ raise argparse.ArgumentTypeError()
68
+
69
+ @classmethod
70
+ def build_cmdline_parser(cls, **kwargs):
71
+ parser = super().build_cmdline_parser(**kwargs)
72
+
73
+ parser.add_argument('--apply-colormap', type=str_to_bool, default=True,
74
+ help="Use colormap for class and instance masks "
75
+ "(default: %(default)s)")
76
+ parser.add_argument('--label-map', type=cls._get_labelmap, default=None,
77
+ help="Labelmap file path or one of %s" % \
78
+ ', '.join(t.name for t in LabelmapType))
79
+ parser.add_argument('--allow-attributes',
80
+ type=str_to_bool, default=True,
81
+ help="Allow export of attributes (default: %(default)s)")
82
+ parser.add_argument('--tasks', type=cls._split_tasks_string,
83
+ help="VOC task filter, comma-separated list of {%s} "
84
+ "(default: all)" % ', '.join(t.name for t in VocTask))
85
+
86
+ return parser
87
+
88
+ def __init__(self, extractor, save_dir,
89
+ tasks=None, apply_colormap=True, label_map=None,
90
+ allow_attributes=True, **kwargs):
91
+ super().__init__(extractor, save_dir, **kwargs)
92
+
93
+ assert tasks is None or isinstance(tasks, (VocTask, list, set))
94
+ if tasks is None:
95
+ tasks = set(VocTask)
96
+ elif isinstance(tasks, VocTask):
97
+ tasks = {tasks}
98
+ else:
99
+ tasks = set(t if t in VocTask else VocTask[t] for t in tasks)
100
+ self._tasks = tasks
101
+
102
+ self._apply_colormap = apply_colormap
103
+ self._allow_attributes = allow_attributes
104
+
105
+ if label_map is None:
106
+ label_map = LabelmapType.source.name
107
+ self._load_categories(label_map)
108
+
109
+ def apply(self):
110
+ self.make_dirs()
111
+ self.save_subsets()
112
+ self.save_label_map()
113
+
114
+ def make_dirs(self):
115
+ save_dir = self._save_dir
116
+ subsets_dir = osp.join(save_dir, VocPath.SUBSETS_DIR)
117
+ cls_subsets_dir = osp.join(subsets_dir,
118
+ VocPath.TASK_DIR[VocTask.classification])
119
+ action_subsets_dir = osp.join(subsets_dir,
120
+ VocPath.TASK_DIR[VocTask.action_classification])
121
+ layout_subsets_dir = osp.join(subsets_dir,
122
+ VocPath.TASK_DIR[VocTask.person_layout])
123
+ segm_subsets_dir = osp.join(subsets_dir,
124
+ VocPath.TASK_DIR[VocTask.segmentation])
125
+ ann_dir = osp.join(save_dir, VocPath.ANNOTATIONS_DIR)
126
+ img_dir = osp.join(save_dir, VocPath.IMAGES_DIR)
127
+ segm_dir = osp.join(save_dir, VocPath.SEGMENTATION_DIR)
128
+ inst_dir = osp.join(save_dir, VocPath.INSTANCES_DIR)
129
+ images_dir = osp.join(save_dir, VocPath.IMAGES_DIR)
130
+
131
+ os.makedirs(subsets_dir, exist_ok=True)
132
+ os.makedirs(ann_dir, exist_ok=True)
133
+ os.makedirs(img_dir, exist_ok=True)
134
+ os.makedirs(segm_dir, exist_ok=True)
135
+ os.makedirs(inst_dir, exist_ok=True)
136
+ os.makedirs(images_dir, exist_ok=True)
137
+
138
+ self._subsets_dir = subsets_dir
139
+ self._cls_subsets_dir = cls_subsets_dir
140
+ self._action_subsets_dir = action_subsets_dir
141
+ self._layout_subsets_dir = layout_subsets_dir
142
+ self._segm_subsets_dir = segm_subsets_dir
143
+ self._ann_dir = ann_dir
144
+ self._img_dir = img_dir
145
+ self._segm_dir = segm_dir
146
+ self._inst_dir = inst_dir
147
+ self._images_dir = images_dir
148
+
149
+ def get_label(self, label_id):
150
+ return self._extractor. \
151
+ categories()[AnnotationType.label].items[label_id].name
152
+
153
+ def save_subsets(self):
154
+ for subset_name, subset in self._extractor.subsets().items():
155
+ class_lists = OrderedDict()
156
+ clsdet_list = OrderedDict()
157
+ action_list = OrderedDict()
158
+ layout_list = OrderedDict()
159
+ segm_list = OrderedDict()
160
+
161
+ for item in subset:
162
+ log.debug("Converting item '%s'", item.id)
163
+
164
+ image_filename = self._make_image_filename(item)
165
+ if self._save_images:
166
+ if item.has_image and item.image.has_data:
167
+ self._save_image(item,
168
+ osp.join(self._images_dir, image_filename))
169
+ else:
170
+ log.debug("Item '%s' has no image", item.id)
171
+
172
+ labels = []
173
+ bboxes = []
174
+ masks = []
175
+ for a in item.annotations:
176
+ if a.type == AnnotationType.label:
177
+ labels.append(a)
178
+ elif a.type == AnnotationType.bbox:
179
+ bboxes.append(a)
180
+ elif a.type == AnnotationType.mask:
181
+ masks.append(a)
182
+
183
+ if self._tasks is None and bboxes or \
184
+ self._tasks & {VocTask.detection, VocTask.person_layout,
185
+ VocTask.action_classification}:
186
+ root_elem = ET.Element('annotation')
187
+ if '_' in item.id:
188
+ folder = item.id[ : item.id.find('_')]
189
+ else:
190
+ folder = ''
191
+ ET.SubElement(root_elem, 'folder').text = folder
192
+ ET.SubElement(root_elem, 'filename').text = image_filename
193
+
194
+ source_elem = ET.SubElement(root_elem, 'source')
195
+ ET.SubElement(source_elem, 'database').text = 'Unknown'
196
+ ET.SubElement(source_elem, 'annotation').text = 'Unknown'
197
+ ET.SubElement(source_elem, 'image').text = 'Unknown'
198
+
199
+ if item.has_image:
200
+ h, w = item.image.size
201
+ size_elem = ET.SubElement(root_elem, 'size')
202
+ ET.SubElement(size_elem, 'width').text = str(w)
203
+ ET.SubElement(size_elem, 'height').text = str(h)
204
+ ET.SubElement(size_elem, 'depth').text = ''
205
+
206
+ item_segmented = 0 < len(masks)
207
+ ET.SubElement(root_elem, 'segmented').text = \
208
+ str(int(item_segmented))
209
+
210
+ objects_with_parts = []
211
+ objects_with_actions = defaultdict(dict)
212
+
213
+ main_bboxes = []
214
+ layout_bboxes = []
215
+ for bbox in bboxes:
216
+ label = self.get_label(bbox.label)
217
+ if self._is_part(label):
218
+ layout_bboxes.append(bbox)
219
+ elif self._is_label(label):
220
+ main_bboxes.append(bbox)
221
+
222
+ for new_obj_id, obj in enumerate(main_bboxes):
223
+ attr = obj.attributes
224
+
225
+ obj_elem = ET.SubElement(root_elem, 'object')
226
+
227
+ obj_label = self.get_label(obj.label)
228
+ ET.SubElement(obj_elem, 'name').text = obj_label
229
+
230
+ if 'pose' in attr:
231
+ ET.SubElement(obj_elem, 'pose').text = \
232
+ str(attr['pose'])
233
+
234
+ if 'truncated' in attr:
235
+ truncated = _convert_attr('truncated', attr, int, 0)
236
+ ET.SubElement(obj_elem, 'truncated').text = \
237
+ '%d' % truncated
238
+
239
+ if 'difficult' in attr:
240
+ difficult = _convert_attr('difficult', attr, int, 0)
241
+ ET.SubElement(obj_elem, 'difficult').text = \
242
+ '%d' % difficult
243
+
244
+ if 'occluded' in attr:
245
+ occluded = _convert_attr('occluded', attr, int, 0)
246
+ ET.SubElement(obj_elem, 'occluded').text = \
247
+ '%d' % occluded
248
+
249
+ bbox = obj.get_bbox()
250
+ if bbox is not None:
251
+ _write_xml_bbox(bbox, obj_elem)
252
+
253
+ for part_bbox in filter(
254
+ lambda x: obj.group and obj.group == x.group,
255
+ layout_bboxes):
256
+ part_elem = ET.SubElement(obj_elem, 'part')
257
+ ET.SubElement(part_elem, 'name').text = \
258
+ self.get_label(part_bbox.label)
259
+ _write_xml_bbox(part_bbox.get_bbox(), part_elem)
260
+
261
+ objects_with_parts.append(new_obj_id)
262
+
263
+ label_actions = self._get_actions(obj_label)
264
+ actions_elem = ET.Element('actions')
265
+ for action in label_actions:
266
+ present = 0
267
+ if action in attr:
268
+ present = _convert_attr(action, attr,
269
+ lambda v: int(v == True), 0)
270
+ ET.SubElement(actions_elem, action).text = \
271
+ '%d' % present
272
+
273
+ objects_with_actions[new_obj_id][action] = present
274
+ if len(actions_elem) != 0:
275
+ obj_elem.append(actions_elem)
276
+
277
+ if self._allow_attributes:
278
+ native_attrs = {'difficult', 'pose',
279
+ 'truncated', 'occluded' }
280
+ native_attrs.update(label_actions)
281
+
282
+ attrs_elem = ET.Element('attributes')
283
+ for k, v in attr.items():
284
+ if k in native_attrs:
285
+ continue
286
+ attr_elem = ET.SubElement(attrs_elem, 'attribute')
287
+ ET.SubElement(attr_elem, 'name').text = str(k)
288
+ ET.SubElement(attr_elem, 'value').text = str(v)
289
+ if len(attrs_elem):
290
+ obj_elem.append(attrs_elem)
291
+
292
+ if self._tasks & {VocTask.detection, VocTask.person_layout,
293
+ VocTask.action_classification}:
294
+ ann_path = osp.join(self._ann_dir, item.id + '.xml')
295
+ os.makedirs(osp.dirname(ann_path), exist_ok=True)
296
+ with open(ann_path, 'w') as f:
297
+ f.write(ET.tostring(root_elem,
298
+ encoding='unicode', pretty_print=True))
299
+
300
+ clsdet_list[item.id] = True
301
+ layout_list[item.id] = objects_with_parts
302
+ action_list[item.id] = objects_with_actions
303
+
304
+ for label_ann in labels:
305
+ label = self.get_label(label_ann.label)
306
+ if not self._is_label(label):
307
+ continue
308
+ class_list = class_lists.get(item.id, set())
309
+ class_list.add(label_ann.label)
310
+ class_lists[item.id] = class_list
311
+
312
+ clsdet_list[item.id] = True
313
+
314
+ if masks:
315
+ compiled_mask = CompiledMask.from_instance_masks(masks,
316
+ instance_labels=[self._label_id_mapping(m.label)
317
+ for m in masks])
318
+
319
+ self.save_segm(
320
+ osp.join(self._segm_dir, item.id + VocPath.SEGM_EXT),
321
+ compiled_mask.class_mask)
322
+ self.save_segm(
323
+ osp.join(self._inst_dir, item.id + VocPath.SEGM_EXT),
324
+ compiled_mask.instance_mask,
325
+ colormap=VocInstColormap)
326
+
327
+ segm_list[item.id] = True
328
+
329
+ if len(item.annotations) == 0:
330
+ clsdet_list[item.id] = None
331
+ layout_list[item.id] = None
332
+ action_list[item.id] = None
333
+ segm_list[item.id] = None
334
+
335
+ if self._tasks & {VocTask.classification, VocTask.detection,
336
+ VocTask.action_classification, VocTask.person_layout}:
337
+ self.save_clsdet_lists(subset_name, clsdet_list)
338
+ if self._tasks & {VocTask.classification}:
339
+ self.save_class_lists(subset_name, class_lists)
340
+ if self._tasks & {VocTask.action_classification}:
341
+ self.save_action_lists(subset_name, action_list)
342
+ if self._tasks & {VocTask.person_layout}:
343
+ self.save_layout_lists(subset_name, layout_list)
344
+ if self._tasks & {VocTask.segmentation}:
345
+ self.save_segm_lists(subset_name, segm_list)
346
+
347
+ def save_action_lists(self, subset_name, action_list):
348
+ if not action_list:
349
+ return
350
+
351
+ os.makedirs(self._action_subsets_dir, exist_ok=True)
352
+
353
+ ann_file = osp.join(self._action_subsets_dir, subset_name + '.txt')
354
+ with open(ann_file, 'w') as f:
355
+ for item in action_list:
356
+ f.write('%s\n' % item)
357
+
358
+ if len(action_list) == 0:
359
+ return
360
+
361
+ all_actions = set(chain(*(self._get_actions(l)
362
+ for l in self._label_map)))
363
+ for action in all_actions:
364
+ ann_file = osp.join(self._action_subsets_dir,
365
+ '%s_%s.txt' % (action, subset_name))
366
+ with open(ann_file, 'w') as f:
367
+ for item, objs in action_list.items():
368
+ if not objs:
369
+ continue
370
+ for obj_id, obj_actions in objs.items():
371
+ presented = obj_actions[action]
372
+ f.write('%s %s % d\n' % \
373
+ (item, 1 + obj_id, 1 if presented else -1))
374
+
375
+ def save_class_lists(self, subset_name, class_lists):
376
+ if not class_lists:
377
+ return
378
+
379
+ os.makedirs(self._cls_subsets_dir, exist_ok=True)
380
+
381
+ for label in self._label_map:
382
+ ann_file = osp.join(self._cls_subsets_dir,
383
+ '%s_%s.txt' % (label, subset_name))
384
+ with open(ann_file, 'w') as f:
385
+ for item, item_labels in class_lists.items():
386
+ if not item_labels:
387
+ continue
388
+ item_labels = [self.get_label(l) for l in item_labels]
389
+ presented = label in item_labels
390
+ f.write('%s % d\n' % (item, 1 if presented else -1))
391
+
392
+ def save_clsdet_lists(self, subset_name, clsdet_list):
393
+ if not clsdet_list:
394
+ return
395
+
396
+ os.makedirs(self._cls_subsets_dir, exist_ok=True)
397
+
398
+ ann_file = osp.join(self._cls_subsets_dir, subset_name + '.txt')
399
+ with open(ann_file, 'w') as f:
400
+ for item in clsdet_list:
401
+ f.write('%s\n' % item)
402
+
403
+ def save_segm_lists(self, subset_name, segm_list):
404
+ if not segm_list:
405
+ return
406
+
407
+ os.makedirs(self._segm_subsets_dir, exist_ok=True)
408
+
409
+ ann_file = osp.join(self._segm_subsets_dir, subset_name + '.txt')
410
+ with open(ann_file, 'w') as f:
411
+ for item in segm_list:
412
+ f.write('%s\n' % item)
413
+
414
+ def save_layout_lists(self, subset_name, layout_list):
415
+ if not layout_list:
416
+ return
417
+
418
+ os.makedirs(self._layout_subsets_dir, exist_ok=True)
419
+
420
+ ann_file = osp.join(self._layout_subsets_dir, subset_name + '.txt')
421
+ with open(ann_file, 'w') as f:
422
+ for item, item_layouts in layout_list.items():
423
+ if item_layouts:
424
+ for obj_id in item_layouts:
425
+ f.write('%s % d\n' % (item, 1 + obj_id))
426
+ else:
427
+ f.write('%s\n' % (item))
428
+
429
+ def save_segm(self, path, mask, colormap=None):
430
+ if self._apply_colormap:
431
+ if colormap is None:
432
+ colormap = self._categories[AnnotationType.mask].colormap
433
+ mask = paint_mask(mask, colormap)
434
+ save_image(path, mask, create_dir=True)
435
+
436
+ def save_label_map(self):
437
+ path = osp.join(self._save_dir, VocPath.LABELMAP_FILE)
438
+ write_label_map(path, self._label_map)
439
+
440
+ def _load_categories(self, label_map_source):
441
+ if label_map_source == LabelmapType.voc.name:
442
+ # use the default VOC colormap
443
+ label_map = make_voc_label_map()
444
+
445
+ elif label_map_source == LabelmapType.source.name and \
446
+ AnnotationType.mask not in self._extractor.categories():
447
+ # generate colormap for input labels
448
+ labels = self._extractor.categories() \
449
+ .get(AnnotationType.label, LabelCategories())
450
+ label_map = OrderedDict((item.name, [None, [], []])
451
+ for item in labels.items)
452
+
453
+ elif label_map_source == LabelmapType.source.name and \
454
+ AnnotationType.mask in self._extractor.categories():
455
+ # use source colormap
456
+ labels = self._extractor.categories()[AnnotationType.label]
457
+ colors = self._extractor.categories()[AnnotationType.mask]
458
+ label_map = OrderedDict()
459
+ for idx, item in enumerate(labels.items):
460
+ color = colors.colormap.get(idx)
461
+ if color is not None:
462
+ label_map[item.name] = [color, [], []]
463
+
464
+ elif isinstance(label_map_source, dict):
465
+ label_map = OrderedDict(
466
+ sorted(label_map_source.items(), key=lambda e: e[0]))
467
+
468
+ elif isinstance(label_map_source, str) and osp.isfile(label_map_source):
469
+ label_map = parse_label_map(label_map_source)
470
+
471
+ else:
472
+ raise Exception("Wrong labelmap specified, "
473
+ "expected one of %s or a file path" % \
474
+ ', '.join(t.name for t in LabelmapType))
475
+
476
+ # There must always be a label with color (0, 0, 0) at index 0
477
+ bg_label = find(label_map.items(), lambda x: x[1][0] == (0, 0, 0))
478
+ if bg_label is not None:
479
+ bg_label = bg_label[0]
480
+ else:
481
+ bg_label = 'background'
482
+ if bg_label not in label_map:
483
+ has_colors = any(v[0] is not None for v in label_map.values())
484
+ color = (0, 0, 0) if has_colors else None
485
+ label_map[bg_label] = [color, [], []]
486
+ label_map.move_to_end(bg_label, last=False)
487
+
488
+ self._categories = make_voc_categories(label_map)
489
+
490
+ # Update colors with assigned values
491
+ colormap = self._categories[AnnotationType.mask].colormap
492
+ for label_id, color in colormap.items():
493
+ label_desc = label_map[
494
+ self._categories[AnnotationType.label].items[label_id].name]
495
+ label_desc[0] = color
496
+
497
+ self._label_map = label_map
498
+ self._label_id_mapping = self._make_label_id_map()
499
+
500
+ def _is_label(self, s):
501
+ return self._label_map.get(s) is not None
502
+
503
+ def _is_part(self, s):
504
+ for label_desc in self._label_map.values():
505
+ if s in label_desc[1]:
506
+ return True
507
+ return False
508
+
509
+ def _is_action(self, label, s):
510
+ return s in self._get_actions(label)
511
+
512
+ def _get_actions(self, label):
513
+ label_desc = self._label_map.get(label)
514
+ if not label_desc:
515
+ return []
516
+ return label_desc[2]
517
+
518
+ def _make_label_id_map(self):
519
+ source_labels = {
520
+ id: label.name for id, label in
521
+ enumerate(self._extractor.categories().get(
522
+ AnnotationType.label, LabelCategories()).items)
523
+ }
524
+ target_labels = {
525
+ label.name: id for id, label in
526
+ enumerate(self._categories[AnnotationType.label].items)
527
+ }
528
+ id_mapping = {
529
+ src_id: target_labels.get(src_label, 0)
530
+ for src_id, src_label in source_labels.items()
531
+ }
532
+
533
+ void_labels = [src_label for src_id, src_label in source_labels.items()
534
+ if src_label not in target_labels]
535
+ if void_labels:
536
+ log.warning("The following labels are remapped to background: %s" %
537
+ ', '.join(void_labels))
538
+ log.debug("Saving segmentations with the following label mapping: \n%s" %
539
+ '\n'.join(["#%s '%s' -> #%s '%s'" %
540
+ (
541
+ src_id, src_label, id_mapping[src_id],
542
+ self._categories[AnnotationType.label] \
543
+ .items[id_mapping[src_id]].name
544
+ )
545
+ for src_id, src_label in source_labels.items()
546
+ ])
547
+ )
548
+
549
+ def map_id(src_id):
550
+ return id_mapping.get(src_id, 0)
551
+ return map_id
552
+
553
+ def _remap_mask(self, mask):
554
+ return remap_mask(mask, self._label_id_mapping)
555
+
556
+ class VocClassificationConverter(VocConverter):
557
+ def __init__(self, *args, **kwargs):
558
+ kwargs['tasks'] = VocTask.classification
559
+ super().__init__(*args, **kwargs)
560
+
561
+ class VocDetectionConverter(VocConverter):
562
+ def __init__(self, *args, **kwargs):
563
+ kwargs['tasks'] = VocTask.detection
564
+ super().__init__(*args, **kwargs)
565
+
566
+ class VocLayoutConverter(VocConverter):
567
+ def __init__(self, *args, **kwargs):
568
+ kwargs['tasks'] = VocTask.person_layout
569
+ super().__init__(*args, **kwargs)
570
+
571
+ class VocActionConverter(VocConverter):
572
+ def __init__(self, *args, **kwargs):
573
+ kwargs['tasks'] = VocTask.action_classification
574
+ super().__init__(*args, **kwargs)
575
+
576
+ class VocSegmentationConverter(VocConverter):
577
+ def __init__(self, *args, **kwargs):
578
+ kwargs['tasks'] = VocTask.segmentation
579
+ super().__init__(*args, **kwargs)
testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/format.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from collections import OrderedDict
7
+ from enum import Enum
8
+ from itertools import chain
9
+ import numpy as np
10
+
11
+ from datumaro.components.extractor import (AnnotationType,
12
+ LabelCategories, MaskCategories
13
+ )
14
+
15
+
16
+ VocTask = Enum('VocTask', [
17
+ 'classification',
18
+ 'detection',
19
+ 'segmentation',
20
+ 'action_classification',
21
+ 'person_layout',
22
+ ])
23
+
24
+ VocLabel = Enum('VocLabel', [
25
+ ('background', 0),
26
+ ('aeroplane', 1),
27
+ ('bicycle', 2),
28
+ ('bird', 3),
29
+ ('boat', 4),
30
+ ('bottle', 5),
31
+ ('bus', 6),
32
+ ('car', 7),
33
+ ('cat', 8),
34
+ ('chair', 9),
35
+ ('cow', 10),
36
+ ('diningtable', 11),
37
+ ('dog', 12),
38
+ ('horse', 13),
39
+ ('motorbike', 14),
40
+ ('person', 15),
41
+ ('pottedplant', 16),
42
+ ('sheep', 17),
43
+ ('sofa', 18),
44
+ ('train', 19),
45
+ ('tvmonitor', 20),
46
+ ('ignored', 255),
47
+ ])
48
+
49
+ VocPose = Enum('VocPose', [
50
+ 'Unspecified',
51
+ 'Left',
52
+ 'Right',
53
+ 'Frontal',
54
+ 'Rear',
55
+ ])
56
+
57
+ VocBodyPart = Enum('VocBodyPart', [
58
+ 'head',
59
+ 'hand',
60
+ 'foot',
61
+ ])
62
+
63
+ VocAction = Enum('VocAction', [
64
+ 'other',
65
+ 'jumping',
66
+ 'phoning',
67
+ 'playinginstrument',
68
+ 'reading',
69
+ 'ridingbike',
70
+ 'ridinghorse',
71
+ 'running',
72
+ 'takingphoto',
73
+ 'usingcomputer',
74
+ 'walking',
75
+ ])
76
+
77
+ def generate_colormap(length=256):
78
+ def get_bit(number, index):
79
+ return (number >> index) & 1
80
+
81
+ colormap = np.zeros((length, 3), dtype=int)
82
+ indices = np.arange(length, dtype=int)
83
+
84
+ for j in range(7, -1, -1):
85
+ for c in range(3):
86
+ colormap[:, c] |= get_bit(indices, c) << j
87
+ indices >>= 3
88
+
89
+ return OrderedDict(
90
+ (id, tuple(color)) for id, color in enumerate(colormap)
91
+ )
92
+
93
+ VocColormap = {id: color for id, color in generate_colormap(256).items()
94
+ if id in [l.value for l in VocLabel]}
95
+ VocInstColormap = generate_colormap(256)
96
+
97
+ class VocPath:
98
+ IMAGES_DIR = 'JPEGImages'
99
+ ANNOTATIONS_DIR = 'Annotations'
100
+ SEGMENTATION_DIR = 'SegmentationClass'
101
+ INSTANCES_DIR = 'SegmentationObject'
102
+ SUBSETS_DIR = 'ImageSets'
103
+ IMAGE_EXT = '.jpg'
104
+ SEGM_EXT = '.png'
105
+ LABELMAP_FILE = 'labelmap.txt'
106
+
107
+ TASK_DIR = {
108
+ VocTask.classification: 'Main',
109
+ VocTask.detection: 'Main',
110
+ VocTask.segmentation: 'Segmentation',
111
+ VocTask.action_classification: 'Action',
112
+ VocTask.person_layout: 'Layout',
113
+ }
114
+
115
+
116
+ def make_voc_label_map():
117
+ labels = sorted(VocLabel, key=lambda l: l.value)
118
+ label_map = OrderedDict(
119
+ (label.name, [VocColormap[label.value], [], []]) for label in labels)
120
+ label_map[VocLabel.person.name][1] = [p.name for p in VocBodyPart]
121
+ label_map[VocLabel.person.name][2] = [a.name for a in VocAction]
122
+ return label_map
123
+
124
+ def parse_label_map(path):
125
+ if not path:
126
+ return None
127
+
128
+ label_map = OrderedDict()
129
+ with open(path, 'r') as f:
130
+ for line in f:
131
+ # skip empty and commented lines
132
+ line = line.strip()
133
+ if not line or line and line[0] == '#':
134
+ continue
135
+
136
+ # name, color, parts, actions
137
+ label_desc = line.strip().split(':')
138
+ name = label_desc[0]
139
+
140
+ if name in label_map:
141
+ raise ValueError("Label '%s' is already defined" % name)
142
+
143
+ if 1 < len(label_desc) and len(label_desc[1]) != 0:
144
+ color = label_desc[1].split(',')
145
+ assert len(color) == 3, \
146
+ "Label '%s' has wrong color, expected 'r,g,b', got '%s'" % \
147
+ (name, color)
148
+ color = tuple([int(c) for c in color])
149
+ else:
150
+ color = None
151
+
152
+ if 2 < len(label_desc) and len(label_desc[2]) != 0:
153
+ parts = label_desc[2].split(',')
154
+ else:
155
+ parts = []
156
+
157
+ if 3 < len(label_desc) and len(label_desc[3]) != 0:
158
+ actions = label_desc[3].split(',')
159
+ else:
160
+ actions = []
161
+
162
+ label_map[name] = [color, parts, actions]
163
+ return label_map
164
+
165
+ def write_label_map(path, label_map):
166
+ with open(path, 'w') as f:
167
+ f.write('# label:color_rgb:parts:actions\n')
168
+ for label_name, label_desc in label_map.items():
169
+ if label_desc[0]:
170
+ color_rgb = ','.join(str(c) for c in label_desc[0])
171
+ else:
172
+ color_rgb = ''
173
+
174
+ parts = ','.join(str(p) for p in label_desc[1])
175
+ actions = ','.join(str(a) for a in label_desc[2])
176
+
177
+ f.write('%s\n' % ':'.join([label_name, color_rgb, parts, actions]))
178
+
179
+ def make_voc_categories(label_map=None):
180
+ if label_map is None:
181
+ label_map = make_voc_label_map()
182
+
183
+ categories = {}
184
+
185
+ label_categories = LabelCategories()
186
+ label_categories.attributes.update(['difficult', 'truncated', 'occluded'])
187
+
188
+ for label, desc in label_map.items():
189
+ label_categories.add(label, attributes=desc[2])
190
+ for part in OrderedDict((k, None) for k in chain(
191
+ *(desc[1] for desc in label_map.values()))):
192
+ label_categories.add(part)
193
+ categories[AnnotationType.label] = label_categories
194
+
195
+ has_colors = any(v[0] is not None for v in label_map.values())
196
+ if not has_colors: # generate new colors
197
+ colormap = generate_colormap(len(label_map))
198
+ else: # only copy defined colors
199
+ label_id = lambda label: label_categories.find(label)[0]
200
+ colormap = { label_id(name): desc[0]
201
+ for name, desc in label_map.items() if desc[0] is not None }
202
+ mask_categories = MaskCategories(colormap)
203
+ mask_categories.inverse_colormap # pylint: disable=pointless-statement
204
+ categories[AnnotationType.mask] = mask_categories
205
+
206
+ return categories
testbed/openvinotoolkit__datumaro/datumaro/plugins/voc_format/importer.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from glob import glob
7
+ import os.path as osp
8
+
9
+ from datumaro.components.extractor import Importer
10
+
11
+ from .format import VocTask, VocPath
12
+
13
+ def find_path(root_path, path, depth=4):
14
+ level, is_found = 0, False
15
+ full_path = None
16
+ while level < depth and not is_found:
17
+ full_path = osp.join(root_path, path)
18
+ paths = glob(full_path)
19
+ if paths:
20
+ full_path = paths[0] # ignore all after the first one
21
+ is_found = osp.isdir(full_path)
22
+ else:
23
+ full_path = None
24
+
25
+ level += 1
26
+ root_path = osp.join(root_path, '*')
27
+
28
+ return full_path
29
+
30
+ class VocImporter(Importer):
31
+ _TASKS = [
32
+ (VocTask.classification, 'voc_classification', 'Main'),
33
+ (VocTask.detection, 'voc_detection', 'Main'),
34
+ (VocTask.segmentation, 'voc_segmentation', 'Segmentation'),
35
+ (VocTask.person_layout, 'voc_layout', 'Layout'),
36
+ (VocTask.action_classification, 'voc_action', 'Action'),
37
+ ]
38
+
39
+ def __call__(self, path, **extra_params):
40
+ from datumaro.components.project import Project # cyclic import
41
+ project = Project()
42
+
43
+ subset_paths = self.find_sources(path)
44
+ if len(subset_paths) == 0:
45
+ raise Exception("Failed to find 'voc' dataset at '%s'" % path)
46
+
47
+ for task, extractor_type, subset_path in subset_paths:
48
+ project.add_source('%s-%s' %
49
+ (task.name, osp.splitext(osp.basename(subset_path))[0]),
50
+ {
51
+ 'url': subset_path,
52
+ 'format': extractor_type,
53
+ 'options': dict(extra_params),
54
+ })
55
+
56
+ return project
57
+
58
+ @classmethod
59
+ def find_sources(cls, path):
60
+ # find root path for the dataset
61
+ root_path = path
62
+ for task, extractor_type, task_dir in cls._TASKS:
63
+ task_path = find_path(root_path, osp.join(VocPath.SUBSETS_DIR, task_dir))
64
+ if task_path:
65
+ root_path = osp.dirname(osp.dirname(task_path))
66
+ break
67
+
68
+ subset_paths = []
69
+ for task, extractor_type, task_dir in cls._TASKS:
70
+ task_path = osp.join(root_path, VocPath.SUBSETS_DIR, task_dir)
71
+
72
+ if not osp.isdir(task_path):
73
+ continue
74
+ task_subsets = [p for p in glob(osp.join(task_path, '*.txt'))
75
+ if '_' not in osp.basename(p)]
76
+ subset_paths += [(task, extractor_type, p) for p in task_subsets]
77
+ return subset_paths
testbed/openvinotoolkit__datumaro/datumaro/plugins/widerface_format.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import os
7
+ import os.path as osp
8
+ import re
9
+
10
+ from datumaro.components.converter import Converter
11
+ from datumaro.components.extractor import (AnnotationType, Bbox, DatasetItem,
12
+ Importer, SourceExtractor)
13
+
14
+
15
+ class WiderFacePath:
16
+ IMAGE_EXT = '.jpg'
17
+ ANNOTATIONS_DIR = 'wider_face_split'
18
+ IMAGES_DIR = 'images'
19
+ SUBSET_DIR = 'WIDER_'
20
+ BBOX_ATTRIBUTES = ['blur', 'expression', 'illumination',
21
+ 'occluded', 'pose', 'invalid']
22
+
23
+ class WiderFaceExtractor(SourceExtractor):
24
+ def __init__(self, path):
25
+ if not osp.isfile(path):
26
+ raise Exception("Can't read annotation file '%s'" % path)
27
+ self._path = path
28
+ self._dataset_dir = osp.dirname(osp.dirname(path))
29
+
30
+ subset = osp.splitext(osp.basename(path))[0]
31
+ match = re.fullmatch(r'wider_face_\S+_bbx_gt', subset)
32
+ if match:
33
+ subset = subset.split('_')[2]
34
+ super().__init__(subset=subset)
35
+
36
+ self._items = list(self._load_items(path).values())
37
+
38
+ def _load_items(self, path):
39
+ items = {}
40
+ with open(path, 'r') as f:
41
+ lines = f.readlines()
42
+
43
+ image_ids = [image_id for image_id, line in enumerate(lines)
44
+ if WiderFacePath.IMAGE_EXT in line]
45
+
46
+ for image_id in image_ids:
47
+ image = lines[image_id]
48
+ image_path = osp.join(self._dataset_dir, WiderFacePath.SUBSET_DIR
49
+ + self._subset, WiderFacePath.IMAGES_DIR, image[:-1])
50
+ item_id = image[:-(len(WiderFacePath.IMAGE_EXT) + 1)]
51
+
52
+ bbox_count = lines[image_id + 1]
53
+ bbox_lines = lines[image_id + 2 : image_id + int(bbox_count) + 2]
54
+ annotations = []
55
+ for bbox in bbox_lines:
56
+ bbox_list = bbox.split()
57
+ if len(bbox_list) >= 4:
58
+ attributes = {}
59
+ if len(bbox_list) == 10:
60
+ i = 4
61
+ for attr in WiderFacePath.BBOX_ATTRIBUTES:
62
+ if bbox_list[i] != '-':
63
+ attributes[attr] = int(bbox_list[i])
64
+ i += 1
65
+ annotations.append(Bbox(
66
+ int(bbox_list[0]), int(bbox_list[1]),
67
+ int(bbox_list[2]), int(bbox_list[3]),
68
+ attributes = attributes
69
+ ))
70
+
71
+ items[item_id] = DatasetItem(id=item_id, subset=self._subset,
72
+ image=image_path, annotations=annotations)
73
+ return items
74
+
75
+ class WiderFaceImporter(Importer):
76
+ @classmethod
77
+ def find_sources(cls, path):
78
+ return cls._find_sources_recursive(osp.join(path,
79
+ WiderFacePath.ANNOTATIONS_DIR), '.txt', 'wider_face')
80
+
81
+ class WiderFaceConverter(Converter):
82
+ DEFAULT_IMAGE_EXT = '.jpg'
83
+
84
+ def apply(self):
85
+ save_dir = self._save_dir
86
+
87
+ os.makedirs(save_dir, exist_ok=True)
88
+
89
+ for subset_name, subset in self._extractor.subsets().items():
90
+ subset_dir = osp.join(save_dir, WiderFacePath.SUBSET_DIR + subset_name)
91
+
92
+ wider_annotation = ''
93
+ for item in subset:
94
+ wider_annotation += '%s\n' % (item.id + WiderFacePath.IMAGE_EXT)
95
+ if item.has_image and self._save_images:
96
+ self._save_image(item, osp.join(save_dir, subset_dir,
97
+ WiderFacePath.IMAGES_DIR, item.id + WiderFacePath.IMAGE_EXT))
98
+
99
+ bboxes = [a for a in item.annotations
100
+ if a.type == AnnotationType.bbox]
101
+
102
+ wider_annotation += '%s\n' % len(bboxes)
103
+ for bbox in bboxes:
104
+ wider_bb = ' '.join('%d' % p for p in bbox.get_bbox())
105
+ wider_annotation += '%s ' % wider_bb
106
+ if bbox.attributes:
107
+ wider_attr = ''
108
+ attr_counter = 0
109
+ for attr in WiderFacePath.BBOX_ATTRIBUTES:
110
+ if attr in bbox.attributes:
111
+ wider_attr += '%s ' % bbox.attributes[attr]
112
+ attr_counter += 1
113
+ else:
114
+ wider_attr += '- '
115
+ if attr_counter > 0:
116
+ wider_annotation += wider_attr
117
+ wider_annotation += '\n'
118
+ annotation_path = osp.join(save_dir, WiderFacePath.ANNOTATIONS_DIR,
119
+ 'wider_face_' + subset_name + '_bbx_gt.txt')
120
+ os.makedirs(osp.dirname(annotation_path), exist_ok=True)
121
+ with open(annotation_path, 'w') as f:
122
+ f.write(wider_annotation)
testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/__init__.py ADDED
File without changes
testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/extractor.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from collections import OrderedDict
7
+ import os.path as osp
8
+ import re
9
+
10
+ from datumaro.components.extractor import (SourceExtractor, Extractor,
11
+ DatasetItem, AnnotationType, Bbox, LabelCategories, Importer
12
+ )
13
+ from datumaro.util import split_path
14
+ from datumaro.util.image import Image
15
+
16
+ from .format import YoloPath
17
+
18
+
19
+ class YoloExtractor(SourceExtractor):
20
+ class Subset(Extractor):
21
+ def __init__(self, name, parent):
22
+ super().__init__()
23
+ self._name = name
24
+ self._parent = parent
25
+ self.items = OrderedDict()
26
+
27
+ def __iter__(self):
28
+ for item_id in self.items:
29
+ yield self._parent._get(item_id, self._name)
30
+
31
+ def __len__(self):
32
+ return len(self.items)
33
+
34
+ def categories(self):
35
+ return self._parent.categories()
36
+
37
+ def __init__(self, config_path, image_info=None):
38
+ super().__init__()
39
+
40
+ if not osp.isfile(config_path):
41
+ raise Exception("Can't read dataset descriptor file '%s'" %
42
+ config_path)
43
+
44
+ rootpath = osp.dirname(config_path)
45
+ self._path = rootpath
46
+
47
+ assert image_info is None or isinstance(image_info, (str, dict))
48
+ if image_info is None:
49
+ image_info = osp.join(rootpath, YoloPath.IMAGE_META_FILE)
50
+ if not osp.isfile(image_info):
51
+ image_info = {}
52
+ if isinstance(image_info, str):
53
+ if not osp.isfile(image_info):
54
+ raise Exception("Can't read image meta file '%s'" % image_info)
55
+ with open(image_info) as f:
56
+ image_info = {}
57
+ for line in f:
58
+ image_name, h, w = line.strip().split()
59
+ image_info[image_name] = (int(h), int(w))
60
+ self._image_info = image_info
61
+
62
+ with open(config_path, 'r') as f:
63
+ config_lines = f.readlines()
64
+
65
+ subsets = OrderedDict()
66
+ names_path = None
67
+
68
+ for line in config_lines:
69
+ match = re.match(r'(\w+)\s*=\s*(.+)$', line)
70
+ if not match:
71
+ continue
72
+
73
+ key = match.group(1)
74
+ value = match.group(2)
75
+ if key == 'names':
76
+ names_path = value
77
+ elif key in YoloPath.SUBSET_NAMES:
78
+ subsets[key] = value
79
+ else:
80
+ continue
81
+
82
+ if not names_path:
83
+ raise Exception("Failed to parse labels path from '%s'" % \
84
+ config_path)
85
+
86
+ for subset_name, list_path in subsets.items():
87
+ list_path = osp.join(self._path, self.localize_path(list_path))
88
+ if not osp.isfile(list_path):
89
+ raise Exception("Not found '%s' subset list file" % subset_name)
90
+
91
+ subset = YoloExtractor.Subset(subset_name, self)
92
+ with open(list_path, 'r') as f:
93
+ subset.items = OrderedDict(
94
+ (self.name_from_path(p), self.localize_path(p))
95
+ for p in f
96
+ )
97
+ subsets[subset_name] = subset
98
+
99
+ self._subsets = subsets
100
+
101
+ self._categories = {
102
+ AnnotationType.label:
103
+ self._load_categories(
104
+ osp.join(self._path, self.localize_path(names_path)))
105
+ }
106
+
107
+ @staticmethod
108
+ def localize_path(path):
109
+ path = path.strip()
110
+ default_base = osp.join('data', '')
111
+ if path.startswith(default_base): # default path
112
+ path = path[len(default_base) : ]
113
+ return path
114
+
115
+ @classmethod
116
+ def name_from_path(cls, path):
117
+ path = cls.localize_path(path)
118
+ parts = split_path(path)
119
+ if 1 < len(parts) and not osp.isabs(path):
120
+ # NOTE: when path is like [data/]<subset_obj>/<image_name>
121
+ # drop everything but <image name>
122
+ # <image name> can be <a/b/c/filename.ext>, so no just basename()
123
+ path = osp.join(*parts[1:])
124
+ return osp.splitext(path)[0]
125
+
126
+ def _get(self, item_id, subset_name):
127
+ subset = self._subsets[subset_name]
128
+ item = subset.items[item_id]
129
+
130
+ if isinstance(item, str):
131
+ image_size = self._image_info.get(item_id)
132
+ image = Image(path=osp.join(self._path, item), size=image_size)
133
+
134
+ anno_path = osp.splitext(image.path)[0] + '.txt'
135
+ annotations = self._parse_annotations(anno_path, image)
136
+
137
+ item = DatasetItem(id=item_id, subset=subset_name,
138
+ image=image, annotations=annotations)
139
+ subset.items[item_id] = item
140
+
141
+ return item
142
+
143
+ @staticmethod
144
+ def _parse_annotations(anno_path, image):
145
+ lines = []
146
+ with open(anno_path, 'r') as f:
147
+ for line in f:
148
+ line = line.strip()
149
+ if line:
150
+ lines.append(line)
151
+
152
+ annotations = []
153
+ if lines:
154
+ size = image.size # use image info as late as possible
155
+ if size is None:
156
+ raise Exception("Can't find image info for '%s'" % image.path)
157
+ image_height, image_width = size
158
+ for line in lines:
159
+ label_id, xc, yc, w, h = line.split()
160
+ label_id = int(label_id)
161
+ w = float(w)
162
+ h = float(h)
163
+ x = float(xc) - w * 0.5
164
+ y = float(yc) - h * 0.5
165
+ annotations.append(Bbox(
166
+ round(x * image_width, 1), round(y * image_height, 1),
167
+ round(w * image_width, 1), round(h * image_height, 1),
168
+ label=label_id
169
+ ))
170
+
171
+ return annotations
172
+
173
+ @staticmethod
174
+ def _load_categories(names_path):
175
+ label_categories = LabelCategories()
176
+
177
+ with open(names_path, 'r') as f:
178
+ for label in f:
179
+ label_categories.add(label.strip())
180
+
181
+ return label_categories
182
+
183
+ def __iter__(self):
184
+ for subset in self._subsets.values():
185
+ for item in subset:
186
+ yield item
187
+
188
+ def __len__(self):
189
+ return sum(len(s) for s in self._subsets.values())
190
+
191
+ def get_subset(self, name):
192
+ return self._subsets[name]
193
+
194
+ class YoloImporter(Importer):
195
+ @classmethod
196
+ def find_sources(cls, path):
197
+ return cls._find_sources_recursive(path, '.data', 'yolo')
testbed/openvinotoolkit__datumaro/datumaro/plugins/yolo_format/format.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+
7
+ class YoloPath:
8
+ DEFAULT_SUBSET_NAME = 'train'
9
+ SUBSET_NAMES = ['train', 'valid']
10
+
11
+ IMAGE_META_FILE = 'images.meta'
testbed/openvinotoolkit__datumaro/datumaro/util/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import os
7
+ import os.path as osp
8
+ from itertools import islice
9
+
10
+
11
+ def find(iterable, pred=lambda x: True, default=None):
12
+ return next((x for x in iterable if pred(x)), default)
13
+
14
+ def dir_items(path, ext, truncate_ext=False):
15
+ items = []
16
+ for f in os.listdir(path):
17
+ ext_pos = f.rfind(ext)
18
+ if ext_pos != -1:
19
+ if truncate_ext:
20
+ f = f[:ext_pos]
21
+ items.append(f)
22
+ return items
23
+
24
+ def split_path(path):
25
+ path = osp.normpath(path)
26
+ parts = []
27
+
28
+ while True:
29
+ path, part = osp.split(path)
30
+ if part:
31
+ parts.append(part)
32
+ else:
33
+ if path:
34
+ parts.append(path)
35
+ break
36
+ parts.reverse()
37
+
38
+ return parts
39
+
40
+ def cast(value, type_conv, default=None):
41
+ if value is None:
42
+ return default
43
+ try:
44
+ return type_conv(value)
45
+ except Exception:
46
+ return default
47
+
48
+ def to_snake_case(s):
49
+ if not s:
50
+ return ''
51
+
52
+ name = [s[0].lower()]
53
+ for idx, char in enumerate(s[1:]):
54
+ idx = idx + 1
55
+ if char.isalpha() and char.isupper():
56
+ prev_char = s[idx - 1]
57
+ if not (prev_char.isalpha() and prev_char.isupper()):
58
+ # avoid "HTML" -> "h_t_m_l"
59
+ name.append('_')
60
+ name.append(char.lower())
61
+ else:
62
+ name.append(char)
63
+ return ''.join(name)
64
+
65
+ def pairs(iterable):
66
+ a = iter(iterable)
67
+ return zip(a, a)
68
+
69
+ def take_by(iterable, count):
70
+ """
71
+ Returns elements from the input iterable by batches of N items.
72
+ ('abcdefg', 3) -> ['a', 'b', 'c'], ['d', 'e', 'f'], ['g']
73
+ """
74
+
75
+ it = iter(iterable)
76
+ while True:
77
+ batch = list(islice(it, count))
78
+ if len(batch) == 0:
79
+ break
80
+
81
+ yield batch
82
+
83
+ def str_to_bool(s):
84
+ t = s.lower()
85
+ if t in {'true', '1', 'ok', 'yes', 'y'}:
86
+ return True
87
+ elif t in {'false', '0', 'no', 'n'}:
88
+ return False
89
+ else:
90
+ raise ValueError("Can't convert value '%s' to bool" % s)
91
+
92
+ def filter_dict(d, exclude_keys):
93
+ return { k: v for k, v in d.items() if k not in exclude_keys }
testbed/openvinotoolkit__datumaro/datumaro/util/annotation_util.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from itertools import groupby
6
+
7
+ import numpy as np
8
+
9
+ from datumaro.components.extractor import _Shape, Mask, AnnotationType, RleMask
10
+ from datumaro.util.mask_tools import mask_to_rle
11
+
12
+
13
+ def find_instances(instance_anns):
14
+ instance_anns = sorted(instance_anns, key=lambda a: a.group)
15
+ ann_groups = []
16
+ for g_id, group in groupby(instance_anns, lambda a: a.group):
17
+ if not g_id:
18
+ ann_groups.extend(([a] for a in group))
19
+ else:
20
+ ann_groups.append(list(group))
21
+
22
+ return ann_groups
23
+
24
+ def find_group_leader(group):
25
+ return max(group, key=lambda x: x.get_area())
26
+
27
+ def _get_bbox(ann):
28
+ if isinstance(ann, (_Shape, Mask)):
29
+ return ann.get_bbox()
30
+ else:
31
+ return ann
32
+
33
+ def max_bbox(annotations):
34
+ boxes = [_get_bbox(ann) for ann in annotations]
35
+ x0 = min((b[0] for b in boxes), default=0)
36
+ y0 = min((b[1] for b in boxes), default=0)
37
+ x1 = max((b[0] + b[2] for b in boxes), default=0)
38
+ y1 = max((b[1] + b[3] for b in boxes), default=0)
39
+ return [x0, y0, x1 - x0, y1 - y0]
40
+
41
+ def mean_bbox(annotations):
42
+ le = len(annotations)
43
+ boxes = [_get_bbox(ann) for ann in annotations]
44
+ mlb = sum(b[0] for b in boxes) / le
45
+ mtb = sum(b[1] for b in boxes) / le
46
+ mrb = sum(b[0] + b[2] for b in boxes) / le
47
+ mbb = sum(b[1] + b[3] for b in boxes) / le
48
+ return [mlb, mtb, mrb - mlb, mbb - mtb]
49
+
50
+ def softmax(x):
51
+ return np.exp(x) / sum(np.exp(x))
52
+
53
+ def nms(segments, iou_thresh=0.5):
54
+ """
55
+ Non-maxima suppression algorithm.
56
+ """
57
+
58
+ indices = np.argsort([b.attributes['score'] for b in segments])
59
+ ious = np.array([[iou(a, b) for b in segments] for a in segments])
60
+
61
+ predictions = []
62
+ while len(indices) != 0:
63
+ i = len(indices) - 1
64
+ pred_idx = indices[i]
65
+ to_remove = [i]
66
+ predictions.append(segments[pred_idx])
67
+ for i, box_idx in enumerate(indices[:i]):
68
+ if iou_thresh < ious[pred_idx, box_idx]:
69
+ to_remove.append(i)
70
+ indices = np.delete(indices, to_remove)
71
+
72
+ return predictions
73
+
74
+ def bbox_iou(a, b):
75
+ """
76
+ IoU computations for simple cases with bounding boxes
77
+ """
78
+ bbox_a = _get_bbox(a)
79
+ bbox_b = _get_bbox(b)
80
+
81
+ aX, aY, aW, aH = bbox_a
82
+ bX, bY, bW, bH = bbox_b
83
+ in_right = min(aX + aW, bX + bW)
84
+ in_left = max(aX, bX)
85
+ in_top = max(aY, bY)
86
+ in_bottom = min(aY + aH, bY + bH)
87
+
88
+ in_w = max(0, in_right - in_left)
89
+ in_h = max(0, in_bottom - in_top)
90
+ intersection = in_w * in_h
91
+ if not intersection:
92
+ return -1
93
+
94
+ a_area = aW * aH
95
+ b_area = bW * bH
96
+ union = a_area + b_area - intersection
97
+ return intersection / union
98
+
99
+ def segment_iou(a, b):
100
+ """
101
+ Generic IoU computation with masks, polygons, and boxes.
102
+ Returns -1 if no intersection, [0; 1] otherwise
103
+ """
104
+ from pycocotools import mask as mask_utils
105
+
106
+ a_bbox = a.get_bbox()
107
+ b_bbox = b.get_bbox()
108
+
109
+ is_bbox = AnnotationType.bbox in [a.type, b.type]
110
+ if is_bbox:
111
+ a = [a_bbox]
112
+ b = [b_bbox]
113
+ else:
114
+ w = max(a_bbox[0] + a_bbox[2], b_bbox[0] + b_bbox[2])
115
+ h = max(a_bbox[1] + a_bbox[3], b_bbox[1] + b_bbox[3])
116
+
117
+ def _to_rle(ann):
118
+ if ann.type == AnnotationType.polygon:
119
+ return mask_utils.frPyObjects([ann.points], h, w)
120
+ elif isinstance(ann, RleMask):
121
+ return [ann.rle]
122
+ elif ann.type == AnnotationType.mask:
123
+ return mask_utils.frPyObjects([mask_to_rle(ann.image)], h, w)
124
+ else:
125
+ raise TypeError("Unexpected arguments: %s, %s" % (a, b))
126
+ a = _to_rle(a)
127
+ b = _to_rle(b)
128
+ return float(mask_utils.iou(a, b, [not is_bbox]))
129
+
130
+ def PDJ(a, b, eps=None, ratio=0.05, bbox=None):
131
+ """
132
+ Percentage of Detected Joints metric.
133
+ Counts the number of matching points.
134
+ """
135
+
136
+ assert eps is not None or ratio is not None
137
+
138
+ p1 = np.array(a.points).reshape((-1, 2))
139
+ p2 = np.array(b.points).reshape((-1, 2))
140
+ if len(p1) != len(p2):
141
+ return 0
142
+
143
+ if not eps:
144
+ if bbox is None:
145
+ bbox = mean_bbox([a, b])
146
+
147
+ diag = (bbox[2] ** 2 + bbox[3] ** 2) ** 0.5
148
+ eps = ratio * diag
149
+
150
+ dists = np.linalg.norm(p1 - p2, axis=1)
151
+ return np.sum(dists < eps) / len(p1)
152
+
153
+ def OKS(a, b, sigma=None, bbox=None, scale=None):
154
+ """
155
+ Object Keypoint Similarity metric.
156
+ https://cocodataset.org/#keypoints-eval
157
+ """
158
+
159
+ p1 = np.array(a.points).reshape((-1, 2))
160
+ p2 = np.array(b.points).reshape((-1, 2))
161
+ if len(p1) != len(p2):
162
+ return 0
163
+
164
+ if not sigma:
165
+ sigma = 0.1
166
+ else:
167
+ assert len(sigma) == len(p1)
168
+
169
+ if not scale:
170
+ if bbox is None:
171
+ bbox = mean_bbox([a, b])
172
+ scale = bbox[2] * bbox[3]
173
+
174
+ dists = np.linalg.norm(p1 - p2, axis=1)
175
+ return np.sum(np.exp(-(dists ** 2) / (2 * scale * (2 * sigma) ** 2)))
176
+
177
+ def smooth_line(points, segments):
178
+ assert 2 <= len(points) // 2 and len(points) % 2 == 0
179
+
180
+ if len(points) // 2 == segments:
181
+ return points
182
+
183
+ points = list(points)
184
+ if len(points) == 2:
185
+ points.extend(points)
186
+ points = np.array(points).reshape((-1, 2))
187
+
188
+ lengths = np.linalg.norm(points[1:] - points[:-1], axis=1)
189
+ dists = [0]
190
+ for l in lengths:
191
+ dists.append(dists[-1] + l)
192
+
193
+ step = dists[-1] / segments
194
+
195
+ new_points = np.zeros((segments + 1, 2))
196
+ new_points[0] = points[0]
197
+
198
+ old_segment = 0
199
+ for new_segment in range(1, segments + 1):
200
+ pos = new_segment * step
201
+ while dists[old_segment + 1] < pos and old_segment + 2 < len(dists):
202
+ old_segment += 1
203
+
204
+ segment_start = dists[old_segment]
205
+ segment_len = lengths[old_segment]
206
+ prev_p = points[old_segment]
207
+ next_p = points[old_segment + 1]
208
+ r = (pos - segment_start) / segment_len
209
+
210
+ new_points[new_segment] = prev_p * (1 - r) + next_p * r
211
+
212
+ return new_points, step
testbed/openvinotoolkit__datumaro/datumaro/util/attrs_util.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ import attr
6
+
7
+ _NOTSET = object()
8
+
9
+ def not_empty(inst, attribute, x):
10
+ assert len(x) != 0, x
11
+
12
+ def default_if_none(conv):
13
+ def validator(inst, attribute, value):
14
+ default = attribute.default
15
+ if value is None:
16
+ if callable(default):
17
+ value = default()
18
+ elif isinstance(default, attr.Factory):
19
+ value = default.factory()
20
+ else:
21
+ value = default
22
+ elif not isinstance(value, attribute.type or conv):
23
+ value = conv(value)
24
+ setattr(inst, attribute.name, value)
25
+ return validator
26
+
27
+ def ensure_cls(c):
28
+ def converter(arg):
29
+ if isinstance(arg, c):
30
+ return arg
31
+ else:
32
+ return c(**arg)
33
+ return converter
testbed/openvinotoolkit__datumaro/datumaro/util/command_targets.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import argparse
7
+ from enum import Enum
8
+
9
+ from datumaro.components.project import Project
10
+ from datumaro.util.image import load_image
11
+
12
+
13
+ TargetKinds = Enum('TargetKinds',
14
+ ['project', 'source', 'external_dataset', 'inference', 'image'])
15
+
16
+ def is_project_name(value, project):
17
+ return value == project.config.project_name
18
+
19
+ def is_project_path(value):
20
+ if value:
21
+ try:
22
+ Project.load(value)
23
+ return True
24
+ except Exception:
25
+ pass
26
+ return False
27
+
28
+ def is_project(value, project=None):
29
+ if is_project_path(value):
30
+ return True
31
+ elif project is not None:
32
+ return is_project_name(value, project)
33
+
34
+ return False
35
+
36
+ def is_source(value, project=None):
37
+ if project is not None:
38
+ try:
39
+ project.get_source(value)
40
+ return True
41
+ except KeyError:
42
+ pass
43
+
44
+ return False
45
+
46
+ def is_external_source(value):
47
+ return False
48
+
49
+ def is_inference_path(value):
50
+ return False
51
+
52
+ def is_image_path(value):
53
+ try:
54
+ return load_image(value) is not None
55
+ except Exception:
56
+ return False
57
+
58
+
59
+ class Target:
60
+ def __init__(self, kind, test, is_default=False, name=None):
61
+ self.kind = kind
62
+ self.test = test
63
+ self.is_default = is_default
64
+ self.name = name
65
+
66
+ def _get_fields(self):
67
+ return [self.kind, self.test, self.is_default, self.name]
68
+
69
+ def __str__(self):
70
+ return self.name or str(self.kind)
71
+
72
+ def __len__(self):
73
+ return len(self._get_fields())
74
+
75
+ def __iter__(self):
76
+ return iter(self._get_fields())
77
+
78
+ def ProjectTarget(kind=TargetKinds.project, test=None,
79
+ is_default=False, name='project name or path',
80
+ project=None):
81
+ if test is None:
82
+ test = lambda v: is_project(v, project=project)
83
+ return Target(kind, test, is_default, name)
84
+
85
+ def SourceTarget(kind=TargetKinds.source, test=None,
86
+ is_default=False, name='source name',
87
+ project=None):
88
+ if test is None:
89
+ test = lambda v: is_source(v, project=project)
90
+ return Target(kind, test, is_default, name)
91
+
92
+ def ExternalDatasetTarget(kind=TargetKinds.external_dataset,
93
+ test=is_external_source,
94
+ is_default=False, name='external dataset path'):
95
+ return Target(kind, test, is_default, name)
96
+
97
+ def InferenceTarget(kind=TargetKinds.inference, test=is_inference_path,
98
+ is_default=False, name='inference path'):
99
+ return Target(kind, test, is_default, name)
100
+
101
+ def ImageTarget(kind=TargetKinds.image, test=is_image_path,
102
+ is_default=False, name='image path'):
103
+ return Target(kind, test, is_default, name)
104
+
105
+
106
+ def target_selector(*targets):
107
+ def selector(value):
108
+ for (kind, test, is_default, _) in targets:
109
+ if (is_default and (value == '' or value is None)) or test(value):
110
+ return (kind, value)
111
+ raise argparse.ArgumentTypeError('Value should be one of: %s' \
112
+ % (', '.join([str(t) for t in targets])))
113
+ return selector
testbed/openvinotoolkit__datumaro/datumaro/util/image.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ # pylint: disable=unused-import
7
+
8
+ from enum import Enum
9
+ from io import BytesIO
10
+ import numpy as np
11
+ import os
12
+ import os.path as osp
13
+
14
+ _IMAGE_BACKENDS = Enum('_IMAGE_BACKENDS', ['cv2', 'PIL'])
15
+ _IMAGE_BACKEND = None
16
+ try:
17
+ import cv2
18
+ _IMAGE_BACKEND = _IMAGE_BACKENDS.cv2
19
+ except ImportError:
20
+ import PIL
21
+ _IMAGE_BACKEND = _IMAGE_BACKENDS.PIL
22
+
23
+ from datumaro.util.image_cache import ImageCache as _ImageCache
24
+
25
+
26
+ def load_image(path, dtype=np.float32):
27
+ """
28
+ Reads an image in the HWC Grayscale/BGR(A) float [0; 255] format.
29
+ """
30
+
31
+ if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
32
+ import cv2
33
+ image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
34
+ image = image.astype(dtype)
35
+ elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
36
+ from PIL import Image
37
+ image = Image.open(path)
38
+ image = np.asarray(image, dtype=dtype)
39
+ if len(image.shape) == 3 and image.shape[2] in {3, 4}:
40
+ image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR
41
+ else:
42
+ raise NotImplementedError()
43
+
44
+ if image is None:
45
+ raise ValueError("Can't open image '%s'" % path)
46
+ assert len(image.shape) in {2, 3}
47
+ if len(image.shape) == 3:
48
+ assert image.shape[2] in {3, 4}
49
+ return image
50
+
51
+ def save_image(path, image, create_dir=False, dtype=np.uint8, **kwargs):
52
+ # NOTE: Check destination path for existence
53
+ # OpenCV silently fails if target directory does not exist
54
+ dst_dir = osp.dirname(path)
55
+ if dst_dir:
56
+ if create_dir:
57
+ os.makedirs(dst_dir, exist_ok=True)
58
+ elif not osp.isdir(dst_dir):
59
+ raise FileNotFoundError("Directory does not exist: '%s'" % dst_dir)
60
+
61
+ if not kwargs:
62
+ kwargs = {}
63
+
64
+ if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
65
+ import cv2
66
+
67
+ params = []
68
+
69
+ ext = path[-4:]
70
+ if ext.upper() == '.JPG':
71
+ params = [
72
+ int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get('jpeg_quality', 75)
73
+ ]
74
+
75
+ image = image.astype(dtype)
76
+ cv2.imwrite(path, image, params=params)
77
+ elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
78
+ from PIL import Image
79
+
80
+ params = {}
81
+ params['quality'] = kwargs.get('jpeg_quality')
82
+ if kwargs.get('jpeg_quality') == 100:
83
+ params['subsampling'] = 0
84
+
85
+ image = image.astype(dtype)
86
+ if len(image.shape) == 3 and image.shape[2] in {3, 4}:
87
+ image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB
88
+ image = Image.fromarray(image)
89
+ image.save(path, **params)
90
+ else:
91
+ raise NotImplementedError()
92
+
93
+ def encode_image(image, ext, dtype=np.uint8, **kwargs):
94
+ if not kwargs:
95
+ kwargs = {}
96
+
97
+ if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
98
+ import cv2
99
+
100
+ params = []
101
+
102
+ if not ext.startswith('.'):
103
+ ext = '.' + ext
104
+
105
+ if ext.upper() == '.JPG':
106
+ params = [
107
+ int(cv2.IMWRITE_JPEG_QUALITY), kwargs.get('jpeg_quality', 75)
108
+ ]
109
+
110
+ image = image.astype(dtype)
111
+ success, result = cv2.imencode(ext, image, params=params)
112
+ if not success:
113
+ raise Exception("Failed to encode image to '%s' format" % (ext))
114
+ return result.tobytes()
115
+ elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
116
+ from PIL import Image
117
+
118
+ if ext.startswith('.'):
119
+ ext = ext[1:]
120
+
121
+ params = {}
122
+ params['quality'] = kwargs.get('jpeg_quality')
123
+ if kwargs.get('jpeg_quality') == 100:
124
+ params['subsampling'] = 0
125
+
126
+ image = image.astype(dtype)
127
+ if len(image.shape) == 3 and image.shape[2] in {3, 4}:
128
+ image[:, :, :3] = image[:, :, 2::-1] # BGR to RGB
129
+ image = Image.fromarray(image)
130
+ with BytesIO() as buffer:
131
+ image.save(buffer, format=ext, **params)
132
+ return buffer.getvalue()
133
+ else:
134
+ raise NotImplementedError()
135
+
136
+ def decode_image(image_bytes, dtype=np.float32):
137
+ if _IMAGE_BACKEND == _IMAGE_BACKENDS.cv2:
138
+ import cv2
139
+ image = np.frombuffer(image_bytes, dtype=np.uint8)
140
+ image = cv2.imdecode(image, cv2.IMREAD_UNCHANGED)
141
+ image = image.astype(dtype)
142
+ elif _IMAGE_BACKEND == _IMAGE_BACKENDS.PIL:
143
+ from PIL import Image
144
+ image = Image.open(BytesIO(image_bytes))
145
+ image = np.asarray(image, dtype=dtype)
146
+ if len(image.shape) == 3 and image.shape[2] in {3, 4}:
147
+ image[:, :, :3] = image[:, :, 2::-1] # RGB to BGR
148
+ else:
149
+ raise NotImplementedError()
150
+
151
+ assert len(image.shape) in {2, 3}
152
+ if len(image.shape) == 3:
153
+ assert image.shape[2] in {3, 4}
154
+ return image
155
+
156
+
157
+ class lazy_image:
158
+ def __init__(self, path, loader=None, cache=None):
159
+ if loader is None:
160
+ loader = load_image
161
+ self.path = path
162
+ self.loader = loader
163
+
164
+ # Cache:
165
+ # - False: do not cache
166
+ # - None: use the global cache
167
+ # - object: an object to be used as cache
168
+ assert cache in {None, False} or isinstance(cache, object)
169
+ self.cache = cache
170
+
171
+ def __call__(self):
172
+ image = None
173
+ image_id = hash(self) # path is not necessary hashable or a file path
174
+
175
+ cache = self._get_cache(self.cache)
176
+ if cache is not None:
177
+ image = cache.get(image_id)
178
+
179
+ if image is None:
180
+ image = self.loader(self.path)
181
+ if cache is not None:
182
+ cache.push(image_id, image)
183
+ return image
184
+
185
+ @staticmethod
186
+ def _get_cache(cache):
187
+ if cache is None:
188
+ cache = _ImageCache.get_instance()
189
+ elif cache == False:
190
+ return None
191
+ return cache
192
+
193
+ def __hash__(self):
194
+ return hash((id(self), self.path, self.loader))
195
+
196
+ class Image:
197
+ def __init__(self, data=None, path=None, loader=None, cache=None,
198
+ size=None):
199
+ assert size is None or len(size) == 2
200
+ if size is not None:
201
+ assert len(size) == 2 and 0 < size[0] and 0 < size[1], size
202
+ size = tuple(size)
203
+ self._size = size # (H, W)
204
+
205
+ assert path is None or isinstance(path, str)
206
+ if path is None:
207
+ path = ''
208
+ self._path = path
209
+
210
+ assert data is not None or path or loader, "Image can not be empty"
211
+ if data is not None:
212
+ assert callable(data) or isinstance(data, np.ndarray), type(data)
213
+ if data is None and (path or loader):
214
+ if osp.isfile(path) or loader:
215
+ data = lazy_image(path, loader=loader, cache=cache)
216
+ self._data = data
217
+
218
+ @property
219
+ def path(self):
220
+ return self._path
221
+
222
+ @property
223
+ def ext(self):
224
+ return osp.splitext(osp.basename(self.path))[1]
225
+
226
+ @property
227
+ def data(self):
228
+ if callable(self._data):
229
+ return self._data()
230
+ return self._data
231
+
232
+ @property
233
+ def has_data(self):
234
+ return self._data is not None
235
+
236
+ @property
237
+ def size(self):
238
+ if self._size is None:
239
+ data = self.data
240
+ if data is not None:
241
+ self._size = data.shape[:2]
242
+ return self._size
243
+
244
+ def __eq__(self, other):
245
+ if isinstance(other, np.ndarray):
246
+ return self.has_data and np.array_equal(self.data, other)
247
+
248
+ if not isinstance(other, __class__):
249
+ return False
250
+ return \
251
+ (np.array_equal(self.size, other.size)) and \
252
+ (self.has_data == other.has_data) and \
253
+ (self.has_data and np.array_equal(self.data, other.data) or \
254
+ not self.has_data)
255
+
256
+ class ByteImage(Image):
257
+ def __init__(self, data=None, path=None, ext=None, cache=None, size=None):
258
+ loader = None
259
+ if data is not None:
260
+ if callable(data) and not isinstance(data, lazy_image):
261
+ data = lazy_image(path, loader=data, cache=cache)
262
+ loader = lambda _: decode_image(self.get_bytes())
263
+
264
+ super().__init__(path=path, size=size, loader=loader, cache=cache)
265
+ if data is None and loader is None:
266
+ # unset defaults for regular images
267
+ # to avoid random file reading to bytes
268
+ self._data = None
269
+
270
+ self._bytes_data = data
271
+ if ext:
272
+ ext = ext.lower()
273
+ if not ext.startswith('.'):
274
+ ext = '.' + ext
275
+ self._ext = ext
276
+
277
+ def get_bytes(self):
278
+ if callable(self._bytes_data):
279
+ return self._bytes_data()
280
+ return self._bytes_data
281
+
282
+ @property
283
+ def ext(self):
284
+ if self._ext:
285
+ return self._ext
286
+ return super().ext
287
+
288
+ def __eq__(self, other):
289
+ if not isinstance(other, __class__):
290
+ return super().__eq__(other)
291
+ return \
292
+ (np.array_equal(self.size, other.size)) and \
293
+ (self.has_data == other.has_data) and \
294
+ (self.has_data and self.get_bytes() == other.get_bytes() or \
295
+ not self.has_data)
testbed/openvinotoolkit__datumaro/datumaro/util/image_cache.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2019-2020 Intel Corporation
2
+ #
3
+ # SPDX-License-Identifier: MIT
4
+
5
+ from collections import OrderedDict
6
+
7
+
8
+ _instance = None
9
+
10
+ DEFAULT_CAPACITY = 2
11
+
12
+ class ImageCache:
13
+ @staticmethod
14
+ def get_instance():
15
+ global _instance
16
+ if _instance is None:
17
+ _instance = ImageCache()
18
+ return _instance
19
+
20
+ def __init__(self, capacity=DEFAULT_CAPACITY):
21
+ self.capacity = int(capacity)
22
+ self.items = OrderedDict()
23
+
24
+ def push(self, item_id, image):
25
+ if self.capacity <= len(self.items):
26
+ self.items.popitem(last=True)
27
+ self.items[item_id] = image
28
+
29
+ def get(self, item_id):
30
+ default = object()
31
+ item = self.items.get(item_id, default)
32
+ if item is default:
33
+ return None
34
+
35
+ self.items.move_to_end(item_id, last=False) # naive splay tree
36
+ return item
37
+
38
+ def size(self):
39
+ return len(self.items)
40
+
41
+ def clear(self):
42
+ self.items.clear()
testbed/openvinotoolkit__datumaro/datumaro/util/log_utils.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ from contextlib import contextmanager
7
+ import logging
8
+
9
+ @contextmanager
10
+ def logging_disabled(max_level=logging.CRITICAL):
11
+ previous_level = logging.root.manager.disable
12
+ logging.disable(max_level)
13
+ try:
14
+ yield
15
+ finally:
16
+ logging.disable(previous_level)
testbed/openvinotoolkit__datumaro/datumaro/util/mask_tools.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2019-2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import numpy as np
7
+
8
+ from datumaro.util.image import lazy_image, load_image
9
+
10
+
11
+ def generate_colormap(length=256):
12
+ """
13
+ Generates colors using PASCAL VOC algorithm.
14
+
15
+ Returns index -> (R, G, B) mapping.
16
+ """
17
+
18
+ def get_bit(number, index):
19
+ return (number >> index) & 1
20
+
21
+ colormap = np.zeros((length, 3), dtype=int)
22
+ indices = np.arange(length, dtype=int)
23
+
24
+ for j in range(7, -1, -1):
25
+ for c in range(3):
26
+ colormap[:, c] |= get_bit(indices, c) << j
27
+ indices >>= 3
28
+
29
+ return {
30
+ id: tuple(color) for id, color in enumerate(colormap)
31
+ }
32
+
33
+ def invert_colormap(colormap):
34
+ return {
35
+ tuple(a): index for index, a in colormap.items()
36
+ }
37
+
38
+ def check_is_mask(mask):
39
+ assert len(mask.shape) in {2, 3}
40
+ if len(mask.shape) == 3:
41
+ assert mask.shape[2] == 1
42
+
43
+ _default_colormap = generate_colormap()
44
+ _default_unpaint_colormap = invert_colormap(_default_colormap)
45
+
46
+ def unpaint_mask(painted_mask, inverse_colormap=None):
47
+ # Covert color mask to index mask
48
+
49
+ # mask: HWC BGR [0; 255]
50
+ # colormap: (R, G, B) -> index
51
+ assert len(painted_mask.shape) == 3
52
+ if inverse_colormap is None:
53
+ inverse_colormap = _default_unpaint_colormap
54
+
55
+ if callable(inverse_colormap):
56
+ map_fn = lambda a: inverse_colormap(
57
+ (a >> 16) & 255, (a >> 8) & 255, a & 255
58
+ )
59
+ else:
60
+ map_fn = lambda a: inverse_colormap[(
61
+ (a >> 16) & 255, (a >> 8) & 255, a & 255
62
+ )]
63
+
64
+ painted_mask = painted_mask.astype(int)
65
+ painted_mask = painted_mask[:, :, 0] + \
66
+ (painted_mask[:, :, 1] << 8) + \
67
+ (painted_mask[:, :, 2] << 16)
68
+ uvals, unpainted_mask = np.unique(painted_mask, return_inverse=True)
69
+ palette = np.array([map_fn(v) for v in uvals], dtype=np.float32)
70
+ unpainted_mask = palette[unpainted_mask].reshape(painted_mask.shape[:2])
71
+
72
+ return unpainted_mask
73
+
74
+ def paint_mask(mask, colormap=None):
75
+ # Applies colormap to index mask
76
+
77
+ # mask: HW(C) [0; max_index] mask
78
+ # colormap: index -> (R, G, B)
79
+ check_is_mask(mask)
80
+
81
+ if colormap is None:
82
+ colormap = _default_colormap
83
+ if callable(colormap):
84
+ map_fn = colormap
85
+ else:
86
+ map_fn = lambda c: colormap.get(c, (-1, -1, -1))
87
+ palette = np.array([map_fn(c)[::-1] for c in range(256)], dtype=np.float32)
88
+
89
+ mask = mask.astype(np.uint8)
90
+ painted_mask = palette[mask].reshape((*mask.shape[:2], 3))
91
+ return painted_mask
92
+
93
+ def remap_mask(mask, map_fn):
94
+ # Changes mask elements from one colormap to another
95
+
96
+ # mask: HW(C) [0; max_index] mask
97
+ check_is_mask(mask)
98
+
99
+ return np.array([map_fn(c) for c in range(256)], dtype=np.uint8)[mask]
100
+
101
+ def make_index_mask(binary_mask, index):
102
+ return np.choose(binary_mask, np.array([0, index], dtype=np.uint8))
103
+
104
+ def make_binary_mask(mask):
105
+ return np.nonzero(mask)
106
+
107
+
108
+ def load_mask(path, inverse_colormap=None):
109
+ mask = load_image(path, dtype=np.uint8)
110
+ if inverse_colormap is not None:
111
+ if len(mask.shape) == 3 and mask.shape[2] != 1:
112
+ mask = unpaint_mask(mask, inverse_colormap)
113
+ return mask
114
+
115
+ def lazy_mask(path, inverse_colormap=None):
116
+ return lazy_image(path, lambda path: load_mask(path, inverse_colormap))
117
+
118
+ def mask_to_rle(binary_mask):
119
+ # walk in row-major order as COCO format specifies
120
+ bounded = binary_mask.ravel(order='F')
121
+
122
+ # add borders to sequence
123
+ # find boundary positions for sequences and compute their lengths
124
+ difs = np.diff(bounded, prepend=[1 - bounded[0]], append=[1 - bounded[-1]])
125
+ counts, = np.where(difs != 0)
126
+
127
+ # start RLE encoding from 0 as COCO format specifies
128
+ if bounded[0] != 0:
129
+ counts = np.diff(counts, prepend=[0])
130
+ else:
131
+ counts = np.diff(counts)
132
+
133
+ return {
134
+ 'counts': counts,
135
+ 'size': list(binary_mask.shape)
136
+ }
137
+
138
+ def mask_to_polygons(mask, tolerance=1.0, area_threshold=1):
139
+ """
140
+ Convert an instance mask to polygons
141
+
142
+ Args:
143
+ mask: a 2d binary mask
144
+ tolerance: maximum distance from original points of
145
+ a polygon to the approximated ones
146
+ area_threshold: minimal area of generated polygons
147
+
148
+ Returns:
149
+ A list of polygons like [[x1,y1, x2,y2 ...], [...]]
150
+ """
151
+ from pycocotools import mask as mask_utils
152
+ from skimage import measure
153
+
154
+ polygons = []
155
+
156
+ # pad mask with 0 around borders
157
+ padded_mask = np.pad(mask, pad_width=1, mode='constant', constant_values=0)
158
+ contours = measure.find_contours(padded_mask, 0.5)
159
+ # Fix coordinates after padding
160
+ contours = np.subtract(contours, 1)
161
+
162
+ for contour in contours:
163
+ if not np.array_equal(contour[0], contour[-1]):
164
+ contour = np.vstack((contour, contour[0])) # make polygon closed
165
+
166
+ contour = measure.approximate_polygon(contour, tolerance)
167
+ if len(contour) <= 2:
168
+ continue
169
+
170
+ contour = np.flip(contour, axis=1).flatten().clip(0) # [x0, y0, ...]
171
+
172
+ # Check if the polygon is big enough
173
+ rle = mask_utils.frPyObjects([contour], mask.shape[0], mask.shape[1])
174
+ area = sum(mask_utils.area(rle))
175
+ if area_threshold <= area:
176
+ polygons.append(contour)
177
+ return polygons
178
+
179
+ def crop_covered_segments(segments, width, height,
180
+ iou_threshold=0.0, ratio_tolerance=0.001, area_threshold=1,
181
+ return_masks=False):
182
+ """
183
+ Find all segments occluded by others and crop them to the visible part only.
184
+ Input segments are expected to be sorted from background to foreground.
185
+
186
+ Args:
187
+ segments: 1d list of segment RLEs (in COCO format)
188
+ width: width of the image
189
+ height: height of the image
190
+ iou_threshold: IoU threshold for objects to be counted as intersected
191
+ By default is set to 0 to process any intersected objects
192
+ ratio_tolerance: an IoU "handicap" value for a situation
193
+ when an object is (almost) fully covered by another one and we
194
+ don't want make a "hole" in the background object
195
+ area_threshold: minimal area of included segments
196
+
197
+ Returns:
198
+ A list of input segments' parts (in the same order as input):
199
+ [
200
+ [[x1,y1, x2,y2 ...], ...], # input segment #0 parts
201
+ mask1, # input segment #1 mask (if source segment is mask)
202
+ [], # when source segment is too small
203
+ ...
204
+ ]
205
+ """
206
+ from pycocotools import mask as mask_utils
207
+
208
+ segments = [[s] for s in segments]
209
+ input_rles = [mask_utils.frPyObjects(s, height, width) for s in segments]
210
+
211
+ for i, rle_bottom in enumerate(input_rles):
212
+ area_bottom = sum(mask_utils.area(rle_bottom))
213
+ if area_bottom < area_threshold:
214
+ segments[i] = [] if not return_masks else None
215
+ continue
216
+
217
+ rles_top = []
218
+ for j in range(i + 1, len(input_rles)):
219
+ rle_top = input_rles[j]
220
+ iou = sum(mask_utils.iou(rle_bottom, rle_top, [0, 0]))[0]
221
+
222
+ if iou <= iou_threshold:
223
+ continue
224
+
225
+ area_top = sum(mask_utils.area(rle_top))
226
+ area_ratio = area_top / area_bottom
227
+
228
+ # If a segment is fully inside another one, skip this segment
229
+ if abs(area_ratio - iou) < ratio_tolerance:
230
+ continue
231
+
232
+ # Check if the bottom segment is fully covered by the top one.
233
+ # There is a mistake in the annotation, keep the background one
234
+ if abs(1 / area_ratio - iou) < ratio_tolerance:
235
+ rles_top = []
236
+ break
237
+
238
+ rles_top += rle_top
239
+
240
+ if not rles_top and not isinstance(segments[i][0], dict) \
241
+ and not return_masks:
242
+ continue
243
+
244
+ rle_bottom = rle_bottom[0]
245
+ bottom_mask = mask_utils.decode(rle_bottom).astype(np.uint8)
246
+
247
+ if rles_top:
248
+ rle_top = mask_utils.merge(rles_top)
249
+ top_mask = mask_utils.decode(rle_top).astype(np.uint8)
250
+
251
+ bottom_mask -= top_mask
252
+ bottom_mask[bottom_mask != 1] = 0
253
+
254
+ if not return_masks and not isinstance(segments[i][0], dict):
255
+ segments[i] = mask_to_polygons(bottom_mask,
256
+ area_threshold=area_threshold)
257
+ else:
258
+ segments[i] = bottom_mask
259
+
260
+ return segments
261
+
262
+ def rles_to_mask(rles, width, height):
263
+ from pycocotools import mask as mask_utils
264
+
265
+ rles = mask_utils.frPyObjects(rles, height, width)
266
+ rles = mask_utils.merge(rles)
267
+ mask = mask_utils.decode(rles)
268
+ return mask
269
+
270
+ def find_mask_bbox(mask):
271
+ cols = np.any(mask, axis=0)
272
+ rows = np.any(mask, axis=1)
273
+ x0, x1 = np.where(cols)[0][[0, -1]]
274
+ y0, y1 = np.where(rows)[0][[0, -1]]
275
+ return [x0, y0, x1 - x0, y1 - y0]
276
+
277
+ def merge_masks(masks):
278
+ """
279
+ Merges masks into one, mask order is responsible for z order.
280
+ """
281
+ if not masks:
282
+ return None
283
+
284
+ merged_mask = masks[0]
285
+ for m in masks[1:]:
286
+ merged_mask = np.where(m != 0, m, merged_mask)
287
+
288
+ return merged_mask
testbed/openvinotoolkit__datumaro/datumaro/util/os_util.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Copyright (C) 2020 Intel Corporation
3
+ #
4
+ # SPDX-License-Identifier: MIT
5
+
6
+ import subprocess
7
+
8
+
9
+ def check_instruction_set(instruction):
10
+ return instruction == str.strip(
11
+ # Let's ignore a warning from bandit about using shell=True.
12
+ # In this case it isn't a security issue and we use some
13
+ # shell features like pipes.
14
+ subprocess.check_output(
15
+ 'lscpu | grep -o "%s" | head -1' % instruction,
16
+ shell=True).decode('utf-8') # nosec
17
+ )